repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cccfran/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
mlyundin/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
adammenges/statsmodels | statsmodels/base/tests/test_generic_methods.py | 25 | 16558 | # -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: Josef Perktold
"""
from statsmodels.compat.python import range
import numpy as np
import statsmodels.api as sm
from statsmodels.compat.scipy import NumpyVersion
from numpy.testing import assert_, assert_allclose, assert_equal
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = NumpyVersion(np.__version__) < '1.5.0'
winoldnp = iswin & npversionless15
class CheckGenericMixin(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def test_fitted(self):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = self.results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def test_predict_types(self):
res = self.results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
# import pandas
# predicted = res.predict(pandas.DataFrame(p_exog))
# assert_(isinstance(predicted, pandas.DataFrame))
# assert_allclose(predicted, fitted, rtol=1e-12)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
start_params = np.array([-0.0565406 , -0.21213599, 0.08783076,
-0.02991835, 0.22901974, 0.0621026,
0.06799283, 0.08406688, 0.18530969,
1.36645452])
self.results = mod.fit(start_params=start_params, disp=0)
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params,
cov_type='naive')
class TestGenericGEEPoissonBC(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# params_est = np.array([-0.0063238 , 0.99463752, 1.02790201, 0.98080081])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
mod = sm.GEE(y_count, self.exog, groups, family=family, cov_struct=vi)
self.results = mod.fit(start_params=start_params,
cov_type='bias_reduced')
# Other test classes
class CheckAnovaMixin(object):
@classmethod
def setup_class(cls):
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setupClass()
cls.data = test.data.drop([0,1,2])
cls.initialize()
def test_combined(self):
res = self.res
wa = res.wald_test_terms(skip_single=False, combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_const = eye[0]
c_w = eye[[2,3]]
c_d = eye[1]
c_dw = eye[[4,5]]
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_const, c_d, c_w, c_dw, c_duration, c_weight])
def test_categories(self):
# test only multicolumn terms
res = self.res
wa = res.wald_test_terms(skip_single=True)
eye = np.eye(len(res.params))
c_w = eye[[2,3]]
c_dw = eye[[4,5]]
compare_waldres(res, wa, [c_w, c_dw])
def compare_waldres(res, wa, constrasts):
for i, c in enumerate(constrasts):
wt = res.wald_test(c)
assert_allclose(wa.table.values[i, 0], wt.statistic)
assert_allclose(wa.table.values[i, 1], wt.pvalue)
df = c.shape[0] if c.ndim == 2 else 1
assert_equal(wa.table.values[i, 2], df)
# attributes
assert_allclose(wa.statistic[i], wt.statistic)
assert_allclose(wa.pvalues[i], wt.pvalue)
assert_equal(wa.df_constraints[i], df)
if res.use_t:
assert_equal(wa.df_denom[i], res.df_resid)
col_names = wa.col_names
if res.use_t:
assert_equal(wa.distribution, 'F')
assert_equal(col_names[0], 'F')
assert_equal(col_names[1], 'P>F')
else:
assert_equal(wa.distribution, 'chi2')
assert_equal(col_names[0], 'chi2')
assert_equal(col_names[1], 'P>chi2')
# SMOKETEST
wa.summary_frame()
class TestWaldAnovaOLS(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
def test_noformula(self):
endog = self.res.model.endog
exog = self.res.model.data.orig_exog
del exog.design_info
res = sm.OLS(endog, exog).fit()
wa = res.wald_test_terms(skip_single=True,
combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_duration, c_weight])
class TestWaldAnovaOLSF(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
class TestWaldAnovaGLM(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = glm("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
class TestWaldAnovaPoisson(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import Poisson
mod = Poisson.from_formula("Days ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(cov_type='HC0')
class TestWaldAnovaNegBin(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb2')
cls.res = mod.fit()
class TestWaldAnovaNegBin1(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb1')
cls.res = mod.fit(cov_type='HC0')
class T_estWaldAnovaOLSNoFormula(object):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
if __name__ == '__main__':
pass
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/examples/ex_feasible_gls_het.py | 34 | 4267 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
There might be something fishy with the example, but I don't see it.
Or maybe it's supposed to be this way because in the first case I don't
include a constant and in the second case I include some of the same
regressors as in the main equation.
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
examples = ['ex1']
if 'ex1' in examples:
#from tut_ols_wls
nsample = 1000
sig = 0.5
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1-5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.015, 1.]
y_true2 = np.dot(X, beta)
w = np.ones(nsample)
w[nsample*6//10:] = 4 #Note this is the squared value
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
#y2[nsample*6/10:] = y_true2[nsample*6/10:] + sig*4. * np.random.normal(size=nsample*4/10)
y2 = y_true2 + sig*np.sqrt(w)* np.random.normal(size=nsample)
X2 = X[:,[0,2]]
X2 = X
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=w)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=w)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), 1./w, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
z = (w[:,None] == np.unique(w)).astype(float) #dummy variable
mod2 = GLSHet(y2, X2, exog_var=z)
res2 = mod2.iterative_fit(2)
print(res2.params)
import statsmodels.api as sm
z = sm.add_constant(w)
mod3 = GLSHet(y2, X2, exog_var=z)
res3 = mod3.iterative_fit(8)
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.18/_downloads/02105ee74aef45a8882996e7ea2860fe/plot_temporal_whitening.py | 15 | 1840 | """
================================
Temporal whitening with AR model
================================
Here we fit an AR model to the data and use it
to temporally whiten the signals.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:1]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 10000:20000] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/tests/test_pipeline.py | 7 | 24571 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = Transf()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Step names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Step names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
| bsd-3-clause |
pradyu1993/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 1 | 6638 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD-like
import warnings
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError, ward_tree, X.T, np.ones((4, 4)))
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = ward_tree(this_X.T,
n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = Ward(n_clusters=10,
connectivity=connectivity.todense())
assert_raises(TypeError, clustering.fit, X)
clustering = Ward(n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]))
assert_raises(ValueError, clustering.fit, X)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10, warn_on_equidistant=False).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
with warnings.catch_warnings(record=True):
w.fit(x)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
IPGP/DSM-Kernel | util/vrac/multiplekernel_write_vtk.py | 1 | 3343 | #!/usr/bin/env python
#!/Users/fujinobuaki/anaconda/bin/python
"""
This is a very simple plot script that reads a 3D DSM-Kernel sensitivity kernel
file and make a plot of it.
"""
import matplotlib.pyplot as plt
import numpy as np
from pyevtk.hl import gridToVTK
#from scipy.io import FortranFile
head=("head","<i")
tail=("tail","<i")
def read_fortran_record(binfile, count, dtype, filetype):
"""reads a sequential fortran binary file record"""
if filetype=='sequential':
rec_start = np.fromfile(binfile, count=1, dtype=np.int32)
content = np.fromfile(binfile, count=count, dtype=dtype)
rec_end = np.fromfile(binfile, count=1, dtype=np.int32)
assert rec_start == rec_end, 'record incomplete. wrong count or datatype'
else:
content = np.fromfile(binfile, count=count, dtype=dtype)
return content
#fname_kernel = 'output/STA90.Explosion.some.Z.100s30s.kernel'
#fname_kernel = 'output/STA.Explosion.some.Z.100s10s.kernel'
#fname_kernel='outputvideo/STA90.Explosion.some.Z.100s30s.0000007.video'
fname_grid = 'tmp2/STA91.Explosion.some1.Z.grid'
#fname_grid = 'tmp2/STA89.Explosion.some.Z.grid'
fname_plot = 'kernel.png'
for itime in range (1,102):
num_snap=str(itime).zfill(7)
fname_kernel='tmp2/STA91.Explosion.some1.Z.100s30s.'+num_snap+'.video'
fname_vtk='ker'+num_snap
kernelfile = open(fname_kernel, 'rb')
gridfile = open(fname_grid, 'rb')
# read grid info:
nr, nphi, ntheta, nktype = read_fortran_record(gridfile, count=4, dtype=np.int32,filetype='sequential')
nktype += 1 # starts counting from zero (should be changed in the code?)
radii = read_fortran_record(gridfile, count=nr, dtype=np.float32,filetype='sequential')
phis = read_fortran_record(gridfile, count=nphi * ntheta, dtype=np.float32,filetype='sequential')
thetas = read_fortran_record(gridfile, count=nphi * ntheta, dtype=np.float32,filetype='sequential')
gridfile.close()
# read kernel
npoints = nr * nphi * ntheta
kernel = read_fortran_record(kernelfile, count=npoints * nktype, dtype=np.float32,filetype='direct')
kernel = kernel.reshape(nktype, ntheta, nphi, nr)
kernelfile.close()
# write vtk file
xgrid = np.outer(np.sin(np.radians(thetas)) * np.cos(np.radians(phis)), radii)
ygrid = np.outer(np.sin(np.radians(thetas)) * np.sin(np.radians(phis)), radii)
zgrid = np.outer(np.cos(np.radians(thetas)), radii)
xgrid = xgrid.reshape(ntheta, nphi, nr)
ygrid = ygrid.reshape(ntheta, nphi, nr)
zgrid = zgrid.reshape(ntheta, nphi, nr)
point_data = {'{:d}'.format(name): data for name, data in zip(range(nktype), kernel)}
gridToVTK(fname_vtk, xgrid, ygrid, zgrid, pointData=point_data)
phis = phis.reshape(ntheta, nphi)
thetas = phis.reshape(ntheta, nphi)
# read kernel data:
# write some output information
r_min, r_max = radii[0], radii[-1]
phi_min, phi_max = phis[0, 0], phis[0, -1]
theta_min, theta_max = thetas[0, 0], thetas[-1, 0]
print ('kernel dimensions (nr={}, nphi={}, ntheta={})'.format(nr, nphi, ntheta))
print ('kernel types: {}'.format(nktype))
print ('radius range = {} -> {}'.format(r_min, r_max))
print ('phis range = {} -> {}'.format(phi_min, phi_max))
print ('thetas range = {} -> {}'.format(theta_min, theta_max))
| gpl-3.0 |
maxhutch/rti-split-scales | process.py | 1 | 3928 | """
Plot planes from joint analysis files.
Usage:
plot.py join <base_path>
plot.py plot <files>... [--output=<dir>]
Options:
--output=<dir> Output directory [default: ./frames]
"""
import h5py
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpi4py import MPI
MPI_RANK = MPI.COMM_WORLD.rank
from dedalus2.extras import plot_tools
even_scale = True
def main(filename, start, count, output):
# Layout
nrows, ncols = 4, 1
image = plot_tools.Box(4, 1.1)
pad = plot_tools.Frame(0.2, 0.1, 0.1, 0.1)
margin = plot_tools.Frame(0.2, 0.1, 0.1, 0.1)
scale = 3.
# Plot settings
dpi = 100
# Create multifigure
mfig = plot_tools.MultiFigure(nrows, ncols, image, pad, margin, scale)
with h5py.File(filename, mode='r') as file:
for index in range(start, start+count):
print(MPI_RANK, filename, start, index, start+count)
# Plot datasets
#taskkey = lambda taskname: write[taskname].attrs['task_number']
#for k, taskname in enumerate(sorted(write, key=taskkey)):
for k, task in enumerate(file['tasks']):
dset = file['tasks'][task]
pcolormesh(mfig, k, task, dset, index)
# Title
title = 't = %g' %file['scales/sim_time'][index]
title_height = 1 - 0.5 * mfig.margin.top / mfig.fig.y
mfig.figure.suptitle(title, y=title_height)
# Save
write = file['scales']['write_number'][index]
savename = lambda index: 'write_%06i.png' %write
fig_path = output.joinpath(savename(write))
mfig.figure.savefig(str(fig_path), dpi=dpi)
mfig.figure.clear()
plt.close(mfig.figure)
def pcolormesh(mfig, k, taskname, dset, index):
# Pick data axes for x and y plot axes
# Note: we use the full time-space data array, so remember that axis 0 is time
xi, yi = (1, 2)
# Slices for data.
# First is time axis, here sliced by the "index" argument.
# The "xi" and "yi" entries should be "slice(None)",
# Others (for >2 spatial dimensions) should be an integer.
datslices = (index, slice(None), slice(None))
# Create axes
i, j = divmod(k, mfig.ncols)
paxes = mfig.add_axes(i, j, [0., 0., 1., 0.91])
caxes = mfig.add_axes(i, j, [0., 0.93, 1., 0.05])
# Get vertices
xmesh, ymesh, data = plot_tools.get_plane(dset, xi, yi, datslices)
# Colormap
cmap = matplotlib.cm.get_cmap('RdBu_r')
cmap.set_bad('0.7')
# Plot
plot = paxes.pcolormesh(xmesh, ymesh, data, cmap=cmap, zorder=1)
paxes.axis(plot_tools.pad_limits(xmesh, ymesh, ypad=0.0, square=False))
paxes.tick_params(length=0, width=0)
if even_scale:
lim = max(abs(data.min()), abs(data.max()))
plot.set_clim(-lim, lim)
# Colorbar
cbar = mfig.figure.colorbar(plot, cax=caxes, orientation='horizontal',
ticks=ticker.MaxNLocator(nbins=5))
cbar.outline.set_visible(False)
caxes.xaxis.set_ticks_position('top')
# Labels
caxes.set_xlabel(taskname)
caxes.xaxis.set_label_position('top')
paxes.set_ylabel(dset.dims[yi].label)
paxes.set_xlabel(dset.dims[xi].label)
if __name__ == "__main__":
import pathlib
from docopt import docopt
from dedalus2.tools import logging
from dedalus2.tools import post
from dedalus2.tools.parallel import Sync
args = docopt(__doc__)
if args['join']:
post.merge_analysis(args['<base_path>'])
elif args['plot']:
output_path = pathlib.Path(args['--output']).absolute()
# Create output directory if needed
with Sync() as sync:
if sync.comm.rank == 0:
if not output_path.exists():
output_path.mkdir()
post.visit(args['<files>'], main, output=output_path)
| gpl-3.0 |
reychil/project-alpha-1 | code/utils/tests/test_noise_correction.py | 1 | 1882 | """ Tests for noise_correction functions in noise_correction.py
This checks the convolution function against the np.convolve build in function
when data follows the assumptions under np.convolve.
Run at the project directory with:
nosetests code/utils/tests/test_noise_correction.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd # new
import sys
import os
import scipy.stats
from scipy.stats import gamma
from numpy.testing import assert_almost_equal, assert_array_equal
from nose.tools import assert_not_equals
# Path to the subject 009 fMRI data used in class.
location_of_data="data/ds009/"
location_of_subject001=location_of_data+"sub001/"
location_to_class_data="data/ds114/"
# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
# Load our noise correction functions.
from noise_correction import mean_underlying_noise,fourier_creation,fourier_predict_underlying_noise
# Load GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
def test_noise_correction():
# tests mean_underlying_noise
# Case where there is noise.
test=np.arange(256)
test=test.reshape((4,4,4,4))
val=np.mean(np.arange(0,256,4))
y_mean = mean_underlying_noise(test)
assert(all(y_mean==(np.tile(val,4)+np.array([0,1,2,3])) ))
# Case where there is no noise.
test_2=np.ones(256)
test_2=test_2.reshape((4,4,4,4))
y_mean2 = mean_underlying_noise(test_2)
assert(all(y_mean2==np.ones(4)))
# Test predicting noise with Fourier series.
fourier_X, fourier_MRSS, fourier_fitted, fourier_residuals = fourier_predict_underlying_noise(y_mean, 10)
naive_resid = y_mean-y_mean.mean()
assert_not_equals(naive_resid[0], fourier_residuals[0])
| bsd-3-clause |
endolith/scipy | scipy/stats/_multivariate.py | 5 | 153946 | #
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._discrete_distns import binom
from . import mvn
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :]
if size == 1:
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The ``alpha`` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where :math:`0 < x_i < 1`.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def entropy(self, alpha):
"""Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
for n in range(dim):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D * np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.07198934, -0.20411041, -0.24385796],
[-0.07198934, 1. , 0.12968613, -0.29471382],
[-0.20411041, 0.12968613, 1. , 0.2828693 ],
[-0.24385796, -0.29471382, 0.2828693 , 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +
1j*random_state.normal(size=(dim, dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[..., None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
``pmf(x, m, n)``
Probability mass function.
``logpmf(x, m, n)``
Log of the probability mass function.
``rvs(m, n, size=1, random_state=None)``
Draw random samples from a multivariate hypergeometric
distribution.
``mean(m, n)``
Mean of the multivariate hypergeometric distribution.
``var(m, n)``
Variance of the multivariate hypergeometric distribution.
``cov(m, n)``
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
rvs[..., c] = ((n != 0) *
random_state.hypergeometric(m[..., c], rem,
n + (n == 0),
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
| bsd-3-clause |
tapomayukh/projects_in_python | rapid_categorization/segmentation/taxel_based_segmentation.py | 1 | 5453 |
import math, numpy as np
import matplotlib.pyplot as pp
import scipy.linalg as lin
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
def callback(data, callback_args):
rospy.loginfo('Getting data!')
# Fixing Transforms
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Gathering Force Data
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
force_arr = fmags_instant.reshape((16,24))
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
for i in range(len(fmags_instant_tuned)):
fmags[i].append(fmags_instant_tuned[i])
# Gathering Contact Data for Haptic Mapping
global global_contact_vector
for i in range(len(fmags_instant_tuned)):
global_contact_vector[i].append(r1*((np.column_stack([data.centers_x[i], data.centers_y[i], data.centers_z[i]])).T) + t1)
def processdata():
rospy.loginfo('Processing data!')
global fmags
global global_contact_vector
for key in fmags:
temp_force_store = []
temp_contact_motion = []
init_contact = 0
init_contact_store = 0.0
max_temp = max(fmags[key])
if max_temp > 0.0:
print key
print '#####'
for i in range(len(fmags[key])):
if fmags[key][i] > 0.0:
init_contact = init_contact + 1
temp_force_store.append(fmags[key][i])
if init_contact == 1:
print "Started Contact !"
init_contact_store = global_contact_vector[key][i]
temp_contact_motion.append(0.0)
else:
temp_contact_motion.append(abs(lin.norm(global_contact_vector[key][i] - init_contact_store)))
else:
if len(temp_force_store) > 0:
print "Broke Contact !"
savedata(temp_force_store, temp_contact_motion)
temp_force_store = []
temp_contact_motion = []
init_contact = 0
init_contact_store = 0.0
def savedata(force, motion):
global trial_index
global directory
time = []
contact_area = []
if len(force) > 10:
rospy.loginfo('Saving data!')
time_len = len(force)
while len(time) < time_len:
if len(time) == 0:
time.append(0.0)
contact_area.append(1.0)
else:
time.append(time[len(time)-1] + 0.01)
contact_area.append(1.0)
if not os.path.exists(directory):
os.makedirs(directory)
ut.save_pickle([time, force, contact_area, motion], directory + '/trial_' + np.str(trial_index) +'.pkl')
trial_index = trial_index + 1
else:
print "Too few samples, Not saving the data"
def plotdata():
rospy.loginfo('Plotting data!')
global directory
global trial_index
for trial_num in range(1, trial_index):
ta = ut.load_pickle(directory + '/trial_' + np.str(trial_num) +'.pkl')
mpu.figure(3*trial_num-2)
pp.title('Time-Varying Force')
pp.xlabel('Time (s)')
pp.ylabel('Max Force')
pp.plot(ta[0], ta[1])
pp.grid('on')
mpu.figure(3*trial_num-1)
pp.title('Time-Varying Contact')
pp.xlabel('Time (s)')
pp.ylabel('No. of Contact Regions')
pp.plot(ta[0], ta[2])
pp.grid('on')
mpu.figure(3*trial_num)
pp.title('Point Tracker')
pp.xlabel('Time (s)')
pp.ylabel('Contact Point Distance')
pp.plot(ta[0], ta[3])
pp.grid('on')
def getdata():
rospy.loginfo('Initializing the Node !')
rospy.init_node('Taxel_Based_Segmentation', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.loginfo('Waiting to Subscribe to the Skin Message...')
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
if __name__ == '__main__':
# Global Params
trial_index = 1
num = 54
directory = '/home/tapo/svn/robot1_data/usr/tapo/data/rapid_categorization/Taxel_Based/Trunk/'+np.str(num)
# Global Data dicts
fmags = {}
for i in range(384):
fmags[i] = []
global_contact_vector = {}
for i in range(384):
global_contact_vector[i] = []
# Function Calls
getdata()
processdata()
#plotdata()
#pp.show()
| mit |
khyrulimam/pemrograman-linear-optimasi-gizi-anak-kos | giapetto.py | 1 | 2575 | import numpy as np
import pulp
# create the LP object, set up as a maximization problem
prob = pulp.LpProblem('Giapetto', pulp.LpMaximize)
# set up decision variables
soldiers = pulp.LpVariable('soldiers', lowBound=0, cat='Integer')
trains = pulp.LpVariable('trains', lowBound=0, cat='Integer')
# model weekly production costs
raw_material_costs = 10 * soldiers + 9 * trains
variable_costs = 14 * soldiers + 10 * trains
# model weekly revenues from toy sales
revenues = 27 * soldiers + 21 * trains
# use weekly profit as the objective function to maximize
profit = revenues - (raw_material_costs + variable_costs)
prob += profit # here's where we actually add it to the obj function
# add constraints for available labor hours
carpentry_hours = soldiers + trains
prob += (carpentry_hours <= 80)
finishing_hours = 2*soldiers + trains
prob += (finishing_hours <= 100)
# add constraint representing demand for soldiers
prob += (soldiers <= 40)
# solve the LP using the default solver
optimization_result = prob.solve()
# make sure we got an optimal solution
assert optimization_result == pulp.LpStatusOptimal
# display the results
for var in (soldiers, trains):
print('Optimal weekly number of {} to produce: {:1.0f}'.format(var.name, var.value()))
from matplotlib import pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
# use seaborn to change the default graphics to something nicer
# and set a nice color palette
import seaborn as sns
sns.set_palette('Set1')
# create the plot object
fig, ax = plt.subplots(figsize=(8, 8))
s = np.linspace(0, 100)
# add carpentry constraint: trains <= 80 - soldiers
plt.plot(s, 80 - s, lw=3, label='carpentry')
plt.fill_between(s, 0, 80 - s, alpha=0.1)
# add finishing constraint: trains <= 100 - 2*soldiers
plt.plot(s, 100 - 2 * s, lw=3, label='finishing')
plt.fill_between(s, 0, 100 - 2 * s, alpha=0.1)
# add demains constraint: soldiers <= 40
plt.plot(40 * np.ones_like(s), s, lw=3, label='demand')
plt.fill_betweenx(s, 0, 40, alpha=0.1)
# add non-negativity constraints
plt.plot(np.zeros_like(s), s, lw=3, label='t non-negative')
plt.plot(s, np.zeros_like(s), lw=3, label='s non-negative')
# highlight the feasible region
path = Path([
(0., 0.),
(0., 80.),
(20., 60.),
(40., 20.),
(40., 0.),
(0., 0.),
])
patch = PathPatch(path, label='feasible region', alpha=0.5)
ax.add_patch(patch)
# labels and stuff
plt.xlabel('soldiers', fontsize=16)
plt.ylabel('trains', fontsize=16)
plt.xlim(-0.5, 100)
plt.ylim(-0.5, 100)
plt.legend(fontsize=14)
plt.show() | apache-2.0 |
shikhardb/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
energyPATHWAYS/energyPATHWAYS | model_building_tools/scenario_builder/scenario_builder.py | 1 | 60144 | # -*- coding: utf-8 -*-
# WARNING: If you try to import something that isn't in your python path,
# xlwings will fail silently when called from Excel (at least on Excel 2016 for Mac)!
import os
import subprocess
import traceback
import json
import string
from datetime import datetime
import platform
import xlwings as xw
import sys
import pandas as pd
import psycopg2
import psycopg2.extras
import numpy as np
from collections import OrderedDict, defaultdict
import csv
import time
class PathwaysLookupError(KeyError):
def __init__(self, val, table, col):
message = "Save failed: '{}' is not a valid value for {}.{}".format(val, table, col)
# Call the base class constructor with the parameters it needs
super(PathwaysLookupError, self).__init__(message)
class SequenceError(Exception):
pass
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
elif exc_type == PathwaysLookupError:
_msg(str(exc_value).strip('"'))
return
exception_lines = [[line] for line in traceback.format_exception(exc_type, exc_value, exc_traceback)]
exception_lines.insert(0, ["Python encountered an exception at {}".format(datetime.now().strftime('%c'))])
wb.sheets['exception'].cells.value = exception_lines
wb.sheets['exception'].activate()
_msg("Python encountered an exception; see 'exception' worksheet.")
sys.excepthook = handle_exception
wb = xw.Book('scenario_builder.xlsm')
sht = wb.sheets.active
con = None
cur = None
directory = os.getcwd()
SENSITIVITIES = {"DemandDriversData":
{'side': 'd', 'parent_table': "DemandDrivers"},
"DemandTechsCapitalCostNewData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsCapitalCostReplacementData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsMainEfficiencyData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsAuxEfficiencyData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsFixedMaintenanceCostData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsFuelSwitchCostData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsInstallationCostNewData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsInstallationCostReplacementData":
{'side': 'd', 'parent_table': "DemandTechs"},
"DemandTechsParasiticEnergyData":
{'side': 'd', 'parent_table': "DemandTechs"},
"PrimaryCostData":
{'side': 's', 'parent_table': "SupplyNodes"},
"ImportCostData":
{'side': 's', 'parent_table': "SupplyNodes"},
"DemandEnergyDemandsData":
{'side': 'd', 'parent_table': "DemandSubsectors"},
"SupplyPotentialData":
{'side': 's', 'parent_table': "SupplyNodes"},
"SupplyEmissionsData":
{'side': 's', 'parent_table': "SupplyNodes"},
"SupplyStockData":
{'side': 's', 'parent_table': "SupplyNodes"},
"StorageTechsCapacityCapitalCostNewData":
{'side': 's', 'parent_table': "SupplyTechs"},
"DispatchTransmissionConstraintData":
{'side': 's', 'parent_table': "DispatchTransmissionConstraint"},
"DispatchTransmissionHurdleRate":
{'side': 's', 'parent_table': "DispatchTransmissionConstraint"},
"DispatchTransmissionLosses":
{'side': 's', 'parent_table': "DispatchTransmissionConstraint"}
}
PARENT_COLUMN_NAMES = ('parent_id', 'subsector_id', 'supply_node_id', 'primary_node_id', 'import_node_id',
'demand_tech_id', 'demand_technology_id', 'supply_tech_id', 'supply_technology_id')
MEASURE_CATEGORIES = ("CO2PriceMeasures",
"DemandEnergyEfficiencyMeasures",
"DemandFlexibleLoadMeasures",
"DemandFuelSwitchingMeasures",
"DemandServiceDemandMeasures",
"DemandSalesShareMeasures",
"DemandStockMeasures",
"BlendNodeBlendMeasures",
"SupplyExportMeasures",
"SupplySalesMeasures",
"SupplySalesShareMeasures",
"SupplyStockMeasures")
SENSITIVTY_LABEL = 'sensitivity'
def _msg(message):
wb.sheets['scenarios'].range('python_msg').value = message
def _clear_msg():
wb.sheets['scenarios'].range('python_msg').clear_contents()
wb.sheets['exception'].clear_contents()
def _get_columns(table):
cur.execute("""SELECT column_name FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = %s""", (table,))
cols = [row[0] for row in cur]
assert cols, "Could not find any columns for table {}. Did you misspell the table name?".format(table)
return cols
def _get_parent_col(data_table):
"""Returns the name of the column in the data table that references the parent table"""
# These are one-off exceptions to our general preference order for parent columns
if data_table == 'DemandSalesData':
return 'demand_technology_id'
if data_table == 'SupplyTechsEfficiencyData':
return 'supply_tech_id'
if data_table in ('SupplySalesData', 'SupplySalesShareData'):
return 'supply_technology_id'
cols = _get_columns(data_table)
# We do it this way so that we use a column earlier in the PARENT_COLUMN_NAMES list over one that's later
parent_cols = [col for col in PARENT_COLUMN_NAMES if col in cols]
if not parent_cols:
raise ValueError("Could not find any known parent-referencing columns in {}. "
"Are you sure it's a table that references a parent table?".format(data_table))
return parent_cols[0]
def _get_id_col_of_parent(parent_table):
"""Some tables identify their members by something more elaborate than 'id', e.g. 'demand_tech_id'"""
return 'id' if 'id' in _get_columns(parent_table) else _get_parent_col(parent_table)
def _get_config(key):
try:
return _get_config.config.get(key, None)
except AttributeError:
_get_config.config = dict()
for line in open(os.path.join(directory, 'scenario_builder_config.txt'), 'r'):
k, v = (part.strip() for part in line.split(':', 2))
# This "if v" is here because if v is the empty string we don't want to store a value for this config
# option; it should be gotten as "None"
if v:
_get_config.config[k] = v
return _get_config.config.get(key, None)
def query_all_measures():
measures = []
for table in MEASURE_CATEGORIES:
if table.startswith("Demand"):
id_name = 'subsector_id'
parent_name = 'DemandSubsectors'
side = 'd'
else:
id_name = 'blend_node_id' if table == 'BlendNodeBlendMeasures' else 'supply_node_id'
parent_name = 'SupplyNodes'
side = 's'
cur.execute('SELECT "{}"."{}", "{}"."name", "{}"."id", "{}"."name" FROM "{}" INNER JOIN "{}" ON "{}".{} = "{}"."id";'.format(
table, id_name, parent_name, table, table, table, parent_name, table, id_name, parent_name))
for row in cur.fetchall():
measures.append([side, row[0], row[1], table, row[2], row[3]])
return pd.DataFrame(measures,
columns=['side', 'sub-node id', 'sub-node name', 'measure type', 'measure id', 'measure name'])
def _query_name_from_parent(id, data_table):
parent_table = SENSITIVITIES[data_table]['parent_table']
parent_table_primary_column = _get_parent_col(parent_table)
cur.execute('SELECT name FROM "{}" WHERE {}={};'.format(parent_table, parent_table_primary_column, id))
name = cur.fetchone()
return name[0] if name else None
def query_all_sensitivities():
sensitivities = []
for table in SENSITIVITIES:
side = SENSITIVITIES[table]['side']
primary_column = _get_parent_col(table)
parent_table = SENSITIVITIES[table]['parent_table']
parent_pri_col = _get_id_col_of_parent(parent_table)
cur.execute("""
SELECT DISTINCT "{table}".{pri_col}, "{parent}".name, sensitivity
FROM "{table}"
JOIN "{parent}" ON "{table}".{pri_col} = "{parent}".{parent_pri_col}
WHERE sensitivity IS NOT NULL;
""".format(table=table, pri_col=primary_column, parent=parent_table, parent_pri_col=parent_pri_col))
unique_sensitivities = cur.fetchall()
sensitivities += [[side, row[0], row[1], table, SENSITIVTY_LABEL, row[2]] for row in unique_sensitivities]
return pd.DataFrame(sensitivities,
columns=['side', 'sub-node id', 'sub-node name', 'measure type', 'measure id', 'measure name'])
def _pull_measures(json_dict):
result = []
for key1, value1 in json_dict.iteritems():
if type(value1) is dict:
result += _pull_measures(value1)
elif key1 in MEASURE_CATEGORIES:
result += [[v, key1] for v in value1]
return result
def _pull_measures_df(json_dict, scenario):
result = _pull_measures(json_dict)
scenario_measures = pd.DataFrame(result, columns=['measure id', 'measure type'])
scenario_measures[scenario + '.json'] = 'x'
return scenario_measures # this is where we make each measure that is in a scenario show up with an x
def _pull_descriptions(json_dict, sub_node_list):
result = []
for key1, value1 in json_dict.iteritems():
if key1 in sub_node_list:
if 'description' in value1.keys(): # dollar sign is added to make it sort first
result += [[sub_node_list[key1]['side'], sub_node_list[key1]['id'], '$description', key1,
value1['description']]]
elif type(value1) is dict:
result += _pull_descriptions(value1, sub_node_list)
return result
def _pull_descriptions_df(json_dict, sub_node_list, scenario):
result = _pull_descriptions(json_dict, sub_node_list)
scenario_descriptions = pd.DataFrame(result, columns=['side', 'sub-node id', 'measure type', 'sub-node name',
scenario + '.json'])
return scenario_descriptions
def _pull_sensitivities(json_dict):
result = []
for key1, value1 in json_dict.iteritems():
if type(value1) is dict:
result += _pull_sensitivities(value1)
elif key1 == "Sensitivities":
result += [(SENSITIVITIES[v['table']]['side'],
v['parent_id'],
v['table'],
'sensitivity',
v['sensitivity'])
for v in value1]
return result
def _pull_sensitivities_df(json_dict, scenario):
result = _pull_sensitivities(json_dict)
scenario_sensitivities = pd.DataFrame(result, columns=['side', 'sub-node id', 'measure type', 'measure id', 'measure name'])
scenario_sensitivities[scenario + '.json'] = 'x'
return scenario_sensitivities
def _inspect_scenario_and_case_names(json_dict, meta_data):
meta_data['scenario_names'].append(json_dict.keys()[0])
demand_case, supply_case, demand_description, supply_description, scenario_description = None, None, None, None, None
for key, value in json_dict.values()[0].items():
if key.lower().startswith('demand case: ') and type(value) is dict:
demand_case = key[13:]
if 'description' in value:
demand_description = value['description']
elif key.lower().startswith('supply case: ') and type(value) is dict:
supply_case = key[13:]
if 'description' in value:
supply_description = value['description']
elif key == 'description':
scenario_description = value
meta_data['demand_cases'].append(demand_case)
meta_data['supply_cases'].append(supply_case)
meta_data['demand_case_description'].append(demand_description)
meta_data['supply_case_description'].append(supply_description)
meta_data['scenario_description'].append(scenario_description)
return meta_data
def _get_list_of_db_nodes_and_subsectors():
cur.execute('SELECT name, id FROM "DemandSubsectors";')
subsectors = [(r[0], {'id': r[1], 'side': 'd'}) for r in cur.fetchall()]
cur.execute('SELECT name, id FROM "SupplyNodes";')
nodes = [(r[0], {'id': r[1], 'side': 's'}) for r in cur.fetchall()]
return dict(subsectors + nodes)
def _get_scenarios_df(scenarios, merge_by_override=None):
folder = sht.range('scenario_folder').value
base_path = os.path.join(directory, folder)
measures_df, description_df, sensitivity_df, meta_data = None, None, None, defaultdict(list)
sub_node_list = _get_list_of_db_nodes_and_subsectors()
for scenario in scenarios:
path = os.path.join(base_path, scenario + '.json')
if not os.path.isfile(path):
_msg("error: cannot find path {}".format(path))
sys.exit()
json_dict = _load_json(path)
scenario_measures = _pull_measures_df(json_dict, scenario)
measures_df = scenario_measures if measures_df is None else pd.merge(measures_df, scenario_measures,
how='outer')
scenario_descriptions = _pull_descriptions_df(json_dict, sub_node_list, scenario)
description_df = scenario_descriptions if description_df is None else pd.merge(description_df,
scenario_descriptions,
how='outer')
scenario_sensitivities = _pull_sensitivities_df(json_dict, scenario)
sensitivity_df = scenario_sensitivities if sensitivity_df is None else pd.merge(sensitivity_df,
scenario_sensitivities,
how='outer')
meta_data = _inspect_scenario_and_case_names(json_dict, meta_data)
all_measures = query_all_measures()
all_sensitivities = query_all_sensitivities()
merge_by = sht.range('merge_by').value if merge_by_override is None else merge_by_override
values_m = pd.merge(all_measures, measures_df, how=merge_by)
values_s = pd.merge(all_sensitivities, sensitivity_df, how=merge_by)
values_s = values_s.set_index(['side', 'sub-node id', 'sub-node name', 'measure type', 'measure id', 'measure name'],
append=False).sort_index(level=['measure type', 'sub-node id', 'measure name'])
values = pd.merge(values_m, description_df, how='outer')
values = values.set_index(['side', 'sub-node id', 'sub-node name', 'measure type', 'measure id', 'measure name'],
append=False).sort_index()
values = values.append(values_s)
return values, meta_data
def _helper_load_scenarios(scenarios):
values, meta_data = _get_scenarios_df(scenarios)
sht.range('values').clear_contents()
sht.range('values').value = values
for meta_name in meta_data:
sht.range(meta_name).clear_contents()
sht.range(meta_name).value = meta_data[meta_name]
_msg("sucessfully loaded {} scenarios from folder {}".format(len(scenarios), sht.range('scenario_folder').value))
def error_check_scenarios():
_connect_to_db()
scenarios = [s for s in sht.range('scenario_list').value if s]
values, meta_data = _get_scenarios_df(scenarios, merge_by_override='outer')
bad_index = np.nonzero([(v is None or v is np.NaN) for v in values.index.get_level_values('sub-node id')])[0]
values = values.iloc[bad_index]
sht.range('values').clear_contents()
sht.range('values').value = values
for meta_name in meta_data:
sht.range(meta_name).clear_contents()
sht.range(meta_name).value = meta_data[meta_name]
if len(bad_index):
_msg("measures not in the database are displayed below")
else:
_msg("all measures were found in the database!")
def load_scenario():
_connect_to_db()
scenarios = [wb.app.selection.value]
_helper_load_scenarios(scenarios)
def load_scenarios():
_connect_to_db()
scenarios = [s for s in sht.range('scenario_list').value if s]
_helper_load_scenarios(scenarios)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
# Adapted from https://github.com/skywind3000/terminal/blob/master/terminal.py
def darwin_osascript(script):
for line in script:
# print line
pass
if type(script) == type([]):
script = '\n'.join(script)
p = subprocess.Popen(['/usr/bin/osascript'], shell=False,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.write(script)
p.stdin.flush()
p.stdin.close()
text = p.stdout.read()
p.stdout.close()
code = p.wait()
# print text
return code, text
# Adapted from https://github.com/skywind3000/terminal/blob/master/terminal.py
def run_on_darwin(command, working_dir):
command = ' '.join(command)
command = 'cd "{}"; '.format(working_dir) + command
command = command.replace('\\', '\\\\')
command = command.replace('"', '\\"')
command = command.replace("'", "\\'")
osascript = [
'tell application "Terminal"',
' do script "%s"' % command,
' activate',
'end tell'
]
return darwin_osascript(osascript)
def run_on_windows(command, working_dir):
os.chdir(working_dir)
command = ["start", "cmd", "/k"] + command
subprocess.call(command, shell=True);
def start_energypathways():
_connect_to_db()
folder = sht.range('scenario_folder_controls').value
working_dir = os.path.join(directory, folder)
base_args = ['EnergyPATHWAYS']
if _get_config('conda_env'):
base_args = ['source', 'activate', _get_config('conda_env') + ';'] + base_args
if sht.range('configINI_name').value != 'config.INI':
base_args += ['-c', sht.range('configINI_name').value]
if sht.range('ep_load_demand').value is True:
base_args.append('-ld')
if sht.range('ep_load_supply').value is True:
base_args.append('-ls')
if sht.range('ep_solve_demand').value is False:
base_args.append('--no_solve_demand')
if sht.range('ep_solve_supply').value is False:
base_args.append('--no_solve_supply')
if sht.range('ep_export_results').value is False:
base_args.append('--no_export_results')
if sht.range('ep_clear_results').value is True:
base_args.append('--clear_results')
if sht.range('ep_save_models').value is False:
base_args.append('--no_save_models')
scenarios = [s for s in sht.range('scenario_list_controls').value if s]
ep_cmd_window_count = int(sht.range('ep_cmd_window_count').value)
run = run_on_darwin if platform.system() == 'Darwin' else run_on_windows
for scenario_chunk in np.array_split(scenarios, ep_cmd_window_count):
args = base_args + [val for pair in zip(['-s']*len(scenario_chunk), scenario_chunk) for val in pair]
run(args, working_dir)
time.sleep(int(sht.range('sleep_between_run_start').value))
_msg("sucessfully launched {} scenarios".format(len(scenarios)))
def load_config():
_make_connections()
_clear_msg()
sht.range('configINI').clear_contents()
folder = sht.range('scenario_folder_controls').value
config_name = sht.range('configINI_name').value
path = os.path.join(directory, folder, config_name)
if not os.path.isfile(path):
_msg("error: cannot find file {}".format(path))
sys.exit()
config = []
with open(path, 'rb') as infile:
for row in infile:
split_row = row.rstrip().replace('=', ':').split(':')
split_row = [split_row[0].rstrip(), split_row[1].lstrip()] if len(split_row)==2 else split_row+['']
config.append(split_row)
sht.range('configINI').value = config
_msg("sucessfully loaded config file")
def is_strnumeric(s):
try:
float(s)
return True
except ValueError:
return False
def save_config():
_make_connections()
folder = sht.range('scenario_folder_controls').value
config_name = sht.range('configINI_name').value
path = os.path.join(directory, folder, config_name)
if not os.path.exists(os.path.join(directory, folder)):
os.makedirs(os.path.join(directory, folder))
config = [row for row in sht.range('configINI').value if (row is not None and row[0] is not None and row[0]!='')]
with open(path, 'wb') as outfile:
csvwriter = csv.writer(outfile, delimiter=':')
for i, row in enumerate(config):
if row[1] is None:
if row[0][0]=='#' or row[0][0]=='[':
csvwriter.writerow([row[0]])
else:
csvwriter.writerow((row[0],''))
else:
key, value = row
value = ('True' if value else 'False') if type(value) is bool else value
value = str(int(value)) if is_strnumeric(str(value)) and (int(value) - value == 0) else str(value)
csvwriter.writerow([key, ' ' + value])
next_row = config[i + 1] if i + 1 < len(config) else None
if next_row is not None and next_row[0][0]=='[' and next_row[0][-1]==']':
csvwriter.writerow([])
_msg("sucessfully saved config file")
def _make_connections():
global wb, sht, directory
wb = xw.Book.caller()
sht = wb.sheets.active
directory = os.path.dirname(wb.fullname)
def _connect_to_db():
_make_connections()
global con, cur
_clear_msg()
pg_host = _get_config('pg_host')
if not pg_host:
pg_host = 'localhost'
pg_user = _get_config('pg_user')
pg_password = _get_config('pg_password')
pg_database = _get_config('pg_database')
conn_str = "host='%s' dbname='%s' user='%s'" % (pg_host, pg_database, pg_user)
if pg_password:
conn_str += " password='%s'" % pg_password
# Open pathways database
con = psycopg2.connect(conn_str)
cur = con.cursor()
_msg("connection to db successful")
def _get_scenario_list(scenario_folder_range):
folder = sht.range(scenario_folder_range).value
path = os.path.join(directory, folder)
if not os.path.exists(path):
_msg("error: cannot find path {}".format(path))
sys.exit()
# os.path.getmtime(path)
# http://stackoverflow.com/questions/237079/how-to-get-file-creation-modification-date-times-in-python
scenarios = sorted([f[:-5] for f in os.listdir(path) if f[-5:] == '.json'])
return scenarios
def refresh_scenario_list():
_make_connections()
_clear_msg()
scenarios = _get_scenario_list('scenario_folder')
sht.range('scenario_list').clear_contents()
sht.range('scenario_list').value = [[s] for s in scenarios] # stack
_msg("scenario list successfully refreshed")
def refresh_scenario_list_controls():
_make_connections()
_clear_msg()
scenarios = _get_scenario_list('scenario_folder_controls')
sht.range('scenario_list_controls').clear_contents()
sht.range('scenario_list_controls').value = [[s] for s in scenarios] # stack
_msg("scenario list successfully refreshed")
def delete_scenario():
_make_connections()
scenario_to_delete = wb.app.selection.value
folder = sht.range('scenario_folder').value
path = os.path.join(directory, folder, scenario_to_delete + '.json')
if not os.path.isfile(path):
_msg("error: cannot find path {}".format(path))
sys.exit()
else:
os.remove(path)
_msg("file {} has been deleted".format(scenario_to_delete))
scenarios = _get_scenario_list()
sht.range('scenario_list').clear_contents()
sht.range('scenario_list').value = [[s] for s in scenarios] # stack
def pop_open_json_file():
_make_connections()
folder = sht.range('scenario_folder').value
file_name = wb.app.selection.value
path = os.path.join(directory, folder, file_name + '.json')
try:
# http://stackoverflow.com/questions/17317219/is-there-an-platform-independent-equivalent-of-os-startfile
if sys.platform == "win32":
os.startfile(path)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, path])
_msg("JSON opened for file {}".format(file_name))
except:
_msg("error: unable to open JSON from path {}".format(path))
def _load_json(path):
with open(path, 'rb') as infile:
loaded = json.load(infile)
return loaded
def save_scenarios():
_connect_to_db()
index_count = 6 # first 6 rows are an index
values = sht.range('values').value
num_good_columns = sum([v is not None for v in values[0]])
values = [row[:num_good_columns] for row in values if row[0] is not None]
num_good_scenarios = num_good_columns - index_count
meta_data = {}
meta_data['d'] = dict(zip(range(num_good_scenarios),
['Demand Case: ' + n for n in sht.range('demand_cases').value[:num_good_scenarios]]))
meta_data['demand_case_description'] = dict(
zip(range(num_good_scenarios), sht.range('demand_case_description').value[:num_good_scenarios]))
meta_data['s'] = dict(zip(range(num_good_scenarios),
['Supply Case: ' + n for n in sht.range('supply_cases').value[:num_good_scenarios]]))
meta_data['supply_case_description'] = dict(
zip(range(num_good_scenarios), sht.range('supply_case_description').value[:num_good_scenarios]))
meta_data['scenario_names'] = dict(
zip(range(num_good_scenarios), sht.range('scenario_names').value[:num_good_scenarios]))
meta_data['scenario_description'] = dict(
zip(range(num_good_scenarios), sht.range('scenario_description').value[:num_good_scenarios]))
# set up JSON structure
json_files = OrderedDict()
for s in range(num_good_scenarios):
json_files[meta_data['scenario_names'][s]] = OrderedDict()
if meta_data['scenario_description'][s]:
json_files[meta_data['scenario_names'][s]]["description"] = meta_data['scenario_description'][s]
json_files[meta_data['scenario_names'][s]][meta_data['d'][s]] = OrderedDict()
if meta_data['demand_case_description'][s]:
json_files[meta_data['scenario_names'][s]][meta_data['d'][s]]["description"] = \
meta_data['demand_case_description'][s]
json_files[meta_data['scenario_names'][s]][meta_data['s'][s]] = OrderedDict()
if meta_data['supply_case_description'][s]:
json_files[meta_data['scenario_names'][s]][meta_data['s'][s]]["description"] = \
meta_data['supply_case_description'][s]
for row in values[1:]:
side, node_id, node_name, measure_type, measure_id, measure_name = row[:index_count]
x_marks_spot = row[index_count:]
for s, x in zip(range(num_good_scenarios), x_marks_spot):
if measure_id == SENSITIVTY_LABEL:
if x == 'x':
if not json_files[meta_data['scenario_names'][s]][meta_data[side][s]].has_key('Sensitivities'):
json_files[meta_data['scenario_names'][s]][meta_data[side][s]]['Sensitivities'] = []
sensitivity = OrderedDict([('table', measure_type), ('parent_id', int(node_id)), ('sensitivity', measure_name)])
json_files[meta_data['scenario_names'][s]][meta_data[side][s]]['Sensitivities'].append(sensitivity)
elif measure_type == '$description': # the dollar sign was added when loading to make it sort first in excel
if x: # if the description is empty, we just want to skip it
if not json_files[meta_data['scenario_names'][s]][meta_data[side][s]].has_key(node_name):
json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name] = OrderedDict()
json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name]["description"] = x
elif x == 'x': # x means we include the measure in the JSON
if not json_files[meta_data['scenario_names'][s]][meta_data[side][s]].has_key(node_name):
json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name] = OrderedDict()
if not json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name].has_key(measure_type):
json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name][measure_type] = []
# they will already be in order as we add them
json_files[meta_data['scenario_names'][s]][meta_data[side][s]][node_name][measure_type].append(int(measure_id))
write_folder = sht.range('scenario_folder').value
base_path = os.path.join(directory, write_folder)
if not os.path.exists(base_path):
os.makedirs(base_path)
for header, name, content in zip(values[0][index_count:], json_files.keys(), json_files.values()):
file_name = header if header[-5:].lower() == '.json' else header + '.json'
path = os.path.join(base_path, file_name)
with open(path, 'wb') as outfile:
json.dump({name: content}, outfile, indent=4, separators=(',', ': '))
_msg("successfully saved all scenarios!")
##############
# Table CRUD #
##############
MEASURE_SHEET_DESCRIPTION_STR = 'C1:C2'
MEASURE_PARENT_NAME_RANGE_STR = 'C5'
MEASURE_PARENT_DATA_RANGE_STR = 'B7:C32'
MEASURE_SUBTABLE_NAME_RANGE_STR = 'F5'
MEASURE_SUBTABLE_DATA_RANGE_STR = 'E7:F32'
MEASURE_DATA_TABLE_NAME_RANGE_STR = 'I5'
# Note that measure data is limited to 1,000 rows; more than that may display, but beyond that will not save!
MEASURE_DATA_RANGE_STR = 'H6:P1012'
PYTHON_MSG_LABEL_STR = 'B3'
PYTHON_MSG_STR = 'C3'
MEASURE_DESCRIPTION_LABEL_STR = 'B1:B2'
PARENT_TABLE_LABEL_STR = 'B5'
SUBTABLE_LABEL_STR = 'E5'
DATA_TABLE_LABEL_STR = 'H5'
PARENT_TABLE_HEADERS_STR = 'B6:F6'
OPTIONS_LABEL_STR = 'R5'
OPTIONS_COLUMN_HEADER_STR = 'S5'
OPTIONS_COLUMN_STR = 'S6:S1006'
FIRST_MEASURE_SHEET = 2
MEASURE_SHEET_COUNT = 3
SUBNODE_COL = 7
MEASURE_TABLE_COL = 9
MEASURE_ID_COL = 10
MEASURE_NAME_COL = 11
# This tells us any special associated subtables.
# "None" means include the usual parent-table-with-"Data"-on-the-end table.
MEASURE_SUBTABLES = {
'DemandFuelSwitchingMeasures': (
'DemandFuelSwitchingMeasuresCost',
'DemandFuelSwitchingMeasuresEnergyIntensity',
'DemandFuelSwitchingMeasuresImpact'
),
'DemandServiceDemandMeasures': (None, 'DemandServiceDemandMeasuresCost'),
'DemandEnergyEfficiencyMeasures': (None, 'DemandEnergyEfficiencyMeasuresCost')
}
# This tells us what id:name lookup table to use for a given column name
LOOKUP_MAP = {
'gau_id': 'GeographiesData',
'oth_1_id': 'OtherIndexesData',
'oth_2_id': 'OtherIndexesData',
'final_energy': 'FinalEnergy',
'final_energy_id': 'FinalEnergy',
'final_energy_from_id': 'FinalEnergy',
'final_energy_to_id': 'FinalEnergy',
'demand_tech_id': 'DemandTechs',
'demand_technology_id': 'DemandTechs',
'supply_tech_id': 'SupplyTechs',
'supply_technology_id': 'SupplyTechs',
'efficiency_type_id': 'EfficiencyTypes',
'supply_node_id': 'SupplyNodes',
'blend_node_id': 'SupplyNodes',
'import_node_id': 'SupplyNodes',
'demand_sector_id': 'DemandSectors',
'ghg_type_id': 'GreenhouseGasEmissionsType',
'ghg_id': 'GreenhouseGases',
'dispatch_feeder_id': 'DispatchFeeders',
'dispatch_constraint_id': 'DispatchConstraintTypes',
'day_type_id': 'DayType',
'timeshift_type_id': 'FlexibleLoadShiftTypes',
'subsector_id': 'DemandSubsectors',
'geography_id': 'Geographies',
'other_index_1_id': 'OtherIndexes',
'other_index_2_id': 'OtherIndexes',
'replaced_demand_tech_id': 'DemandTechs',
'input_type_id': 'InputTypes',
'interpolation_method_id': 'CleaningMethods',
'extrapolation_method_id': 'CleaningMethods',
'stock_decay_function_id': 'StockDecayFunctions',
'currency_id': 'Currencies',
'demand_tech_efficiency_types': 'DemandTechEfficiencyTypes',
'definition_id': 'Definitions',
'demand_tech_unit_type_id': 'DemandTechUnitTypes',
'shape_id': 'Shapes',
'linked_id': 'DemandTechs',
'supply_type_id': 'SupplyTypes',
'tradable_geography_id': 'Geographies'
}
# These hold lookup tables that are static for our purposes, so can be shared at the module level
# (no sense in going back to the database for these lookup tables for each individual chunk)
name_lookups = {}
id_lookups = {}
def _lookup_for(table, key_col='id', filter_col=None, filter_value=None):
"""
Provides dictionaries of id: name or key_col: id from a table, depending on which column is specified as the
key_col. Optionally filters the table by another column first, which is needed when we're only interested
in, e.g. the geographical units within a particular geography.
"""
if key_col == 'id':
fields = 'id, name'
else:
fields = '{}, id'.format(key_col)
query = 'SELECT {} FROM "{}"'.format(fields, table)
if filter_col:
query += ' WHERE {} = %s'.format(filter_col)
cur.execute(query, (filter_value,))
else:
cur.execute(query)
rows = cur.fetchall()
keys = [row[0] for row in rows]
if len(keys) != len(set(keys)):
filter_str = ", filtering on {} = {}".format(filter_col, filter_value) if filter_col else ''
raise ValueError("Duplicate keys creating lookup for {} using key column {}{}.".format(
table, key_col, filter_str
))
return {row[0]: row[1] for row in rows}
def _name_for(table, id):
"""Look up the name for a common item like a geographical unit or an other index value"""
try:
return name_lookups[table][id]
except KeyError:
name_lookups[table] = _lookup_for(table)
return name_lookups[table][id]
def _id_for(table, name):
"""
Look up the id of an item by its name.
Note that this does NOT do any filtering of the source table, so if you need to make sure the lookup is only
done on a subset of the table (e.g., because different subsets may re-use the same name), you'll need to
use lookup_for() directly
"""
try:
return id_lookups[table][name]
except KeyError:
id_lookups[table] = _lookup_for(table, 'name')
return id_lookups[table][name]
def _data_header(cols, parent_dict):
"""Constructs a header row for the worksheet based on the columns in the table and contents of the parent row"""
out = []
for col in cols:
if col == 'gau_id':
out.append(parent_dict['geography_id'])
elif col == 'oth_1_id':
out.append(parent_dict['other_index_1_id'])
elif col == 'oth_2_id':
out.append(parent_dict['other_index_2_id'])
else:
out.append(col)
return out
def _load_parent_row(table, parent_id):
parent_id_col = _get_id_col_of_parent(table)
parent_query = 'SELECT * FROM "{}" WHERE {} = %s'.format(table, parent_id_col)
cur.execute(parent_query, (parent_id,))
parent_col_names = [desc[0] for desc in cur.description]
row = cur.fetchone()
values = []
if row:
for i, val in enumerate(row):
if val and parent_col_names[i] in LOOKUP_MAP:
values.append(_name_for(LOOKUP_MAP[parent_col_names[i]], val))
elif val in (True, False):
values.append(str(val))
else:
values.append(val)
return zip(parent_col_names, values)
def _clear_measure_sheet_contents():
for measure_sheet_index in range(FIRST_MEASURE_SHEET, FIRST_MEASURE_SHEET + MEASURE_SHEET_COUNT):
sheet = wb.sheets[measure_sheet_index]
sheet.clear_contents()
sheet.range(PYTHON_MSG_LABEL_STR).value = 'PYTHON MSG'
sheet.range(PYTHON_MSG_STR).formula = '=python_msg'
sheet.range(MEASURE_DESCRIPTION_LABEL_STR).value = [['Measure Description:'], ['Data Table Description:']]
sheet.range(PARENT_TABLE_LABEL_STR).value = 'Parent Table:'
sheet.range(SUBTABLE_LABEL_STR).value = 'Sub Table:'
sheet.range(DATA_TABLE_LABEL_STR).value = 'Data Table:'
sheet.range(PARENT_TABLE_HEADERS_STR).value = ['Column Name', 'Value', None, 'Column Name', 'Value']
sheet.range(OPTIONS_LABEL_STR).value = 'Options:'
wb.sheets[FIRST_MEASURE_SHEET + 1].name = '(no data)'
# Sheet names can't be identical, thus the trailing space
wb.sheets[FIRST_MEASURE_SHEET + 2].name = '(no data) '
def _load_measure_sheet(measure_sheet, table, parent_id, parent_data, subtable=None, data_table=None):
# Assumption: all of the measure data sheets have had their contents cleared before entering this function
sheet_description_range = measure_sheet.range(MEASURE_SHEET_DESCRIPTION_STR)
parent_name_range = measure_sheet.range(MEASURE_PARENT_NAME_RANGE_STR)
parent_data_range = measure_sheet.range(MEASURE_PARENT_DATA_RANGE_STR)
subtable_name_range = measure_sheet.range(MEASURE_SUBTABLE_NAME_RANGE_STR)
subtable_data_range = measure_sheet.range(MEASURE_SUBTABLE_DATA_RANGE_STR)
data_table_name_range = measure_sheet.range(MEASURE_DATA_TABLE_NAME_RANGE_STR)
data_range = measure_sheet.range(MEASURE_DATA_RANGE_STR)
# Extract some useful info from the parent_data
parent_dict = dict(parent_data)
# Show user the name of the data table, within the sheet and in the worksheet name itself
if data_table is None:
data_table = subtable + 'Data' if subtable else table + 'Data'
# Populate metadata about table and data for parent object
title = "{} for {} #{}".format(data_table, table, parent_id)
if 'name' in parent_dict:
title += ', "{}"'.format(parent_dict['name'])
cur.execute("""
SELECT obj_description(pg_class.oid)
FROM pg_class
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE relkind = 'r'
AND nspname = 'public'
AND relname = %s;
""", (data_table,))
table_desc = cur.fetchone()[0]
sheet_description_range.value = [[title], [table_desc]]
parent_name_range.value = table
parent_data_range.value = parent_data
# Excel worksheet names are limited to 31 characters; this takes the rightmost 31 characters,
# then strips any dangling word endings off the beginning
measure_sheet.name = data_table[-31:].lstrip(string.ascii_lowercase)
# This might seem a little redundant, but we need it for saving later on
data_table_name_range.value = data_table
# Load subtable "parent" information
if subtable:
subtable_name_range.value = subtable
subtable_data = _load_parent_row(subtable, parent_id)
if subtable_data:
subtable_data_range.value = subtable_data
# If there's a subtable we overwrite the previous parent_dict with the subtable's values because the
# subtable is what will have, e.g. the geography_id for the data table
parent_dict = dict(subtable_data)
else:
# If there was no data in the subtable, there won't be (or shouldn't be!) any data in the data table,
# so we can abort.
subtable_data_range.value = ['(no data found in this table for this measure or sensitivity)']
return
else:
subtable_name_range.value = '(N/A)'
# Populate time series data
parent_col = _get_parent_col(data_table)
data_query = 'SELECT * FROM "{}" WHERE {} = %s'.format(data_table, parent_col)
cur.execute(data_query, (parent_id,))
data_col_names = [desc[0] for desc in cur.description]
rows = cur.fetchall()
if len(rows):
# Any column that's empty in the first row is one that this chunk doesn't use, so we'll skip it.
# (Columns must be always used or never used within a given chunk -- "sensitivity" is the one exception.)
skip_indexes = set(i for i, val in enumerate(rows[0]) if val is None and data_col_names[i] != 'sensitivity')
skip_indexes.add(data_col_names.index('id'))
skip_indexes.add(data_col_names.index(parent_col))
header_cols = [col for i, col in enumerate(data_col_names) if i not in skip_indexes]
data = []
for row in rows:
xl_row = []
for i, val in enumerate(row):
if i in skip_indexes:
continue
if data_col_names[i] in LOOKUP_MAP:
xl_row.append(_name_for(LOOKUP_MAP[data_col_names[i]], val))
else:
xl_row.append(val)
data.append(xl_row)
data.sort()
data.insert(0, _data_header(header_cols, parent_dict))
data_range.value = data
else:
# TODO: instead of doing nothing, try to populate column headers for the data table so the user can enter data
pass
def load_measure():
_connect_to_db()
_clear_measure_sheet_contents()
# Handle the row differently depending on whether it's a sensitivity or not
if sht.range((wb.app.selection.row, MEASURE_ID_COL)).value == SENSITIVTY_LABEL:
data_table = sht.range((wb.app.selection.row, MEASURE_TABLE_COL)).value
table = SENSITIVITIES[data_table]['parent_table']
for suffix in ('NewData', 'ReplacementData', 'Data'):
if data_table.endswith(suffix):
subtable = data_table[:-len(suffix)]
break
parent_id = int(sht.range((wb.app.selection.row, SUBNODE_COL)).value)
parent_data = _load_parent_row(table, parent_id)
_load_measure_sheet(wb.sheets[FIRST_MEASURE_SHEET], table, parent_id, parent_data, subtable, data_table)
_msg("Sensitivity loaded")
else:
table = sht.range((wb.app.selection.row, MEASURE_TABLE_COL)).value
parent_id = int(sht.range((wb.app.selection.row, MEASURE_ID_COL)).value)
parent_data = _load_parent_row(table, parent_id)
measure_sheet_offset = 0
subtables = MEASURE_SUBTABLES.get(table, [None])
for subtable in subtables:
_load_measure_sheet(wb.sheets[FIRST_MEASURE_SHEET + measure_sheet_offset], table, parent_id, parent_data, subtable)
measure_sheet_offset += 1
_msg("{} measure data sheet(s) loaded".format(measure_sheet_offset))
wb.sheets[FIRST_MEASURE_SHEET].activate()
def _fix_sequence(table):
print table
con.rollback()
seq_fix_query = """
SELECT setval(pg_get_serial_sequence('"{table}"', 'id'), MAX(id))
FROM "{table}";
""".format(table=table)
cur.execute(seq_fix_query)
def _duplicate_rows(table, old_parent_id, new_parent_id=None):
pri_col = _get_id_col_of_parent(table)
# If there's no new_parent_id yet, this must be the top parent table that we're duplicating
parent_col = _get_parent_col(table)
copy_key_col = parent_col if new_parent_id else pri_col
cols_to = []
cols_from = []
for col in _get_columns(table):
if col == pri_col and pri_col != parent_col:
continue
cols_to.append(col)
if col == 'name':
cols_from.append("COALESCE('copy of ' || name, 'copy of unnamed measure #' || id)")
elif col == parent_col and new_parent_id:
cols_from.append(str(new_parent_id))
else:
cols_from.append(col)
query = """
INSERT INTO "{table}" ({cols_to})
SELECT {cols_from}
FROM "{table}"
WHERE {copy_key_col} = %s
RETURNING {pri_col}
""".format(table=table, cols_to=', '.join(cols_to), cols_from=', '.join(cols_from),
copy_key_col=copy_key_col, pri_col=pri_col)
try:
cur.execute(query, (old_parent_id,))
except psycopg2.IntegrityError as e:
_fix_sequence(table)
raise SequenceError
inserted_id = cur.fetchone()[0]
return inserted_id
# This is the method that actually duplicates the measure; it's separate from duplicate_measure()
# so that we don't keep reconnecting to the database if we need to retry due to a sequence problem
def _duplicate_measure(retries=0):
table = sht.range((wb.app.selection.row, MEASURE_TABLE_COL)).value
parent_id = int(sht.range((wb.app.selection.row, MEASURE_ID_COL)).value)
parent_dict = dict(_load_parent_row(table, parent_id))
try:
new_parent_id = _duplicate_rows(table, parent_id)
subtables = MEASURE_SUBTABLES.get(table, [None])
for subtable in subtables:
if subtable is None:
_duplicate_rows(table + 'Data', parent_id, new_parent_id)
else:
old_subparent_row = _load_parent_row(subtable, parent_id)
# Occasionally it happens that a subtable doesn't have any data for a particular measure,
# so we need this "if" to guard against trying to copy None
if old_subparent_row:
old_subparent_id = dict(old_subparent_row)[_get_id_col_of_parent(subtable)]
new_subparent_id = _duplicate_rows(subtable, parent_id, new_parent_id)
_duplicate_rows(subtable + 'Data', old_subparent_id, new_subparent_id)
except SequenceError:
# We had to fix a sequence in one of the tables. This means our transaction got rolled back,
# any data we previously inserted is gone and we need to start over
assert retries <= 10, "duplicate_measures() seems to be stuck in a sequence-fixing loop; aborting."
_duplicate_measure(retries + 1)
con.commit()
_msg("Duplicated {} #{}. Compare scenarios with 'merge by: left' to see the new measure.".format(table, parent_id))
def duplicate_measure():
if sht.range((wb.app.selection.row, MEASURE_ID_COL)).value == SENSITIVTY_LABEL:
_msg("Can't duplicate sensitivities, only measures")
return
_connect_to_db()
_duplicate_measure()
def _parent_data_to_dict(parent_data):
"""Make a dict out of the column names and values for the parent data, discarding any unused rows"""
parent_dict = {}
for (col, val) in parent_data:
if col:
parent_dict[col] = val
return parent_dict
def _update_parent_table(parent_table, parent_dict):
update_cols = []
update_vals = []
id_col_of_parent = _get_id_col_of_parent(parent_table)
if id_col_of_parent in LOOKUP_MAP:
parent_id = _id_for(LOOKUP_MAP[id_col_of_parent], parent_dict[id_col_of_parent])
else:
parent_id = parent_dict[id_col_of_parent]
for col, val in parent_dict.iteritems():
if col != id_col_of_parent:
update_cols.append(col)
if val is None or val == '':
update_vals.append(None)
elif col in LOOKUP_MAP:
try:
update_vals.append(_id_for(LOOKUP_MAP[col], val))
except KeyError:
raise PathwaysLookupError(val, parent_table, col)
else:
update_vals.append(val)
val_placeholder = ', '.join(["%s"] * len(update_vals))
parent_update_query = 'UPDATE "{}" SET ({}) = ({}) WHERE {} = %s'.format(
parent_table, ', '.join(update_cols), val_placeholder, id_col_of_parent
)
cur.execute(parent_update_query, update_vals + [parent_id])
def save_measure_sheet():
assert hasattr(psycopg2.extras, 'execute_values'), "Scenario builder requires psycopg2 version 2.7 or greater " \
"in order to save data; please use conda or pip to upgrade " \
"your psycopg2 package"
_connect_to_db()
top_parent_name = sht.range(MEASURE_PARENT_NAME_RANGE_STR).value
parent_data_range = sht.range(MEASURE_PARENT_DATA_RANGE_STR)
subtable_name = sht.range(MEASURE_SUBTABLE_NAME_RANGE_STR).value
subtable_data_range = sht.range(MEASURE_SUBTABLE_DATA_RANGE_STR)
data_table_name_range = sht.range(MEASURE_DATA_TABLE_NAME_RANGE_STR)
data_range = sht.range(MEASURE_DATA_RANGE_STR)
top_parent_dict = _parent_data_to_dict(parent_data_range.value)
subtable_dict = _parent_data_to_dict(subtable_data_range.value)
id_col_of_parent = _get_id_col_of_parent(top_parent_name)
parent_id = top_parent_dict[id_col_of_parent]
# To get the effective parent row for the data table, we use the "nearest" table -- that is, use the subtable
# if there is one, otherwise use the top level parent
parent_dict = subtable_dict or top_parent_dict
data_table = data_table_name_range.value
# Load the measure data into a list of lists, discarding unused rows and columns
measure_data = data_range.value
used_cols = set(i for i, val in enumerate(measure_data[0]) if val)
measure_data = [[val for i, val in enumerate(row) if i in used_cols] for row in measure_data if any(row)]
if not measure_data:
_msg("Save failed: could not find measure data to save")
return
header = measure_data.pop(0)
# Convert the column names shown in Excel back to the actual database column names
db_cols = [_get_parent_col(data_table)]
for col in header:
if col == parent_dict['geography_id']:
db_cols.append('gau_id')
# Note: This will now accept foreign GAUs by not filtering by geography_id with creating the geographies_data id lookup.
# It is possible because the name column of GeographiesData is itself unique. This feature is for advanced users only.
geographies_data = _lookup_for('GeographiesData', 'name')
elif 'other_index_1_id' in parent_dict and parent_dict['other_index_1_id'] and col == parent_dict['other_index_1_id']:
db_cols.append('oth_1_id')
other_index_1_data = _lookup_for('OtherIndexesData', 'name', 'other_index_id',
_id_for('OtherIndexes', parent_dict['other_index_1_id']))
elif 'other_index_2_id' in parent_dict and parent_dict['other_index_2_id'] and col == parent_dict['other_index_2_id']:
db_cols.append('oth_2_id')
other_index_2_data = _lookup_for('OtherIndexesData', 'name', 'other_index_id',
_id_for('OtherIndexes', parent_dict['other_index_2_id']))
else:
db_cols.append(col)
# Convert the text values shown in the data table back into database ids
db_rows = []
for row in measure_data:
db_row = [parent_id]
for i, val in enumerate(row):
# + 1 here because we need to skip past the parent_id column
col = db_cols[i + 1]
if col == 'gau_id':
try:
db_row.append(geographies_data[val])
except KeyError:
raise PathwaysLookupError(val, data_table, parent_dict['geography_id'])
elif col == 'oth_1_id':
try:
db_row.append(other_index_1_data[val])
except KeyError:
raise PathwaysLookupError(val, data_table, parent_dict['other_index_1_id'])
elif col == 'oth_2_id':
try:
db_row.append(other_index_2_data[val])
except KeyError:
raise PathwaysLookupError(val, data_table, parent_dict['other_index_2_id'])
elif col in LOOKUP_MAP:
try:
db_row.append(_id_for(LOOKUP_MAP[col], val))
except KeyError:
raise PathwaysLookupError(val, data_table, col)
else:
db_row.append(val)
db_rows.append(db_row)
# Clean out the old data from the data table for this parent_id
del_query = 'DELETE FROM "{}" WHERE {} = %s'.format(data_table, db_cols[0])
cur.execute(del_query, (parent_id,))
# Insert the new data from the worksheet
ins_query = 'INSERT INTO "{}" ({}) VALUES %s'.format(data_table, ', '.join(db_cols))
try:
# This requires psycopg2 >=2.7
psycopg2.extras.execute_values(cur, ins_query, db_rows)
except psycopg2.IntegrityError:
_fix_sequence(data_table)
# Now redo the delete and insert that we were originally trying to do
cur.execute(del_query, (parent_id,))
psycopg2.extras.execute_values(cur, ins_query, db_rows)
# Update the parent tables
_update_parent_table(top_parent_name, top_parent_dict)
if subtable_dict:
_update_parent_table(subtable_name, subtable_dict)
con.commit()
_msg('Successfully saved {} for parent_id {}'.format(data_table, int(parent_id)))
def _range_contains(container, cell):
# Check if cell is inside container. "Cell" may actually be a multi-cell range, in which case we're just
# checking the top-left cell, not looking at its entire extent.
return (container.column <= cell.column <= container.last_cell.column and
container.row <= cell.row <= container.last_cell.row)
def _replace_options(msg, new_header=None, new_options=None):
_msg(msg)
# Replaces the contents of the options area. If no header or options are passed in, they're just cleared
header = sht.range(OPTIONS_COLUMN_HEADER_STR)
options = sht.range(OPTIONS_COLUMN_STR)
if new_header:
header.value = new_header
else:
header.clear_contents()
options.clear_contents()
if new_options:
options.value = [[option] for option in new_options]
def get_options():
_connect_to_db()
parent_data_range = sht.range(MEASURE_PARENT_DATA_RANGE_STR)
subtable_data_range = sht.range(MEASURE_SUBTABLE_DATA_RANGE_STR)
data_table_range = sht.range(MEASURE_DATA_RANGE_STR)
selection = wb.app.selection
lookup = None
inside_range = None
if _range_contains(parent_data_range, selection):
inside_range = 'parent'
elif _range_contains(subtable_data_range, selection):
inside_range = 'subtable'
elif _range_contains(data_table_range, selection):
inside_range = 'data'
if inside_range:
if inside_range == 'data':
table = sht.range(MEASURE_DATA_TABLE_NAME_RANGE_STR).value
lookup_col = sht.range((data_table_range.row, selection.column)).value
if not lookup_col:
_replace_options("Select a column with a column name at the top to get options.")
return
top_parent_dict = _parent_data_to_dict(parent_data_range.value)
subtable_dict = _parent_data_to_dict(subtable_data_range.value)
# To get the effective parent row for the data table, we use the "nearest" table -- that is, use the subtable
# if there is one, otherwise use the top level parent
parent_dict = subtable_dict or top_parent_dict
# Special case lookups if the "column" isn't a real column but rather a value from the parent
# table's geography or other index
if lookup_col == parent_dict['geography_id']:
# because using foreign GAUs is an advanced feature and we have 1000s of geographies in the model,
# it makes sense here to still filter by geography_id even though it will technically support foreign GAUs as an input.
lookup = _lookup_for('GeographiesData', 'name', 'geography_id',
_id_for('Geographies', parent_dict['geography_id']))
elif 'other_index_1_id' in parent_dict and parent_dict['other_index_1_id'] and lookup_col == parent_dict[
'other_index_1_id']:
lookup = _lookup_for('OtherIndexesData', 'name', 'other_index_id',
_id_for('OtherIndexes', parent_dict['other_index_1_id']))
elif 'other_index_2_id' in parent_dict and parent_dict['other_index_2_id'] and lookup_col == parent_dict[
'other_index_2_id']:
lookup = _lookup_for('OtherIndexesData', 'name', 'other_index_id',
_id_for('OtherIndexes', parent_dict['other_index_2_id']))
else:
if inside_range == 'parent':
lookup_xl_col = parent_data_range.column
table = sht.range(MEASURE_PARENT_NAME_RANGE_STR).value
else:
lookup_xl_col = subtable_data_range.column
table = sht.range(MEASURE_SUBTABLE_NAME_RANGE_STR).value
lookup_col = sht.range((selection.row, lookup_xl_col)).value
if not lookup_col:
_replace_options("Select a row containing a column name to get options.")
return
qualified_col = "{}.{}".format(table, lookup_col)
if not lookup:
# We haven't already retrieved the lookup table due to one of the special cases above
try:
lookup_table = LOOKUP_MAP[lookup_col]
except KeyError:
_replace_options("No preset options to show for {}.".format(qualified_col))
return
lookup = _lookup_for(lookup_table, 'name')
options = sorted(lookup.keys())
_replace_options("Options loaded for {}".format(qualified_col), qualified_col, options)
else:
_replace_options("To get options select a row within a table.")
def delete_measure():
_connect_to_db()
table = sht.range((wb.app.selection.row, MEASURE_TABLE_COL)).value
if sht.range((wb.app.selection.row, MEASURE_ID_COL)).value == SENSITIVTY_LABEL:
parent_id = int(sht.range((wb.app.selection.row, SUBNODE_COL)).value)
parent_col = _get_parent_col(table)
sensitivity = sht.range((wb.app.selection.row, MEASURE_NAME_COL)).value
query = 'DELETE FROM "{}" WHERE {} = %s AND sensitivity = %s'.format(table, parent_col)
cur.execute(query, (parent_id, sensitivity))
deleted_count = cur.rowcount
con.commit()
_msg('{} "{}" rows deleted from {} #{}'.format(deleted_count, sensitivity, table, parent_id))
else:
parent_id = int(sht.range((wb.app.selection.row, MEASURE_ID_COL)).value)
parent_id_col = _get_id_col_of_parent(table)
query = 'DELETE FROM "{}" WHERE {} = %s'.format(table, parent_id_col)
cur.execute(query, (parent_id,))
deleted_count = cur.rowcount
if deleted_count == 1:
con.commit()
_msg("{} #{} deleted.".format(table, parent_id))
else:
con.rollback()
raise IOError("Canceling deletion because {} measures would have been deleted.".format(deleted_count))
# This tricksiness enables us to debug from the command line, e.g. using ipdb
if __name__ == '__main__':
xw.Book('scenario_builder.xlsm').set_mock_caller()
import ipdb
with ipdb.launch_ipdb_on_exception():
# This is just an example; call whatever you're trying to debug here
delete_measure()
| mit |
sem-geologist/hyperspy | hyperspy/_signals/lazy.py | 3 | 36898 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
from dask import threaded
from dask.diagnostics import ProgressBar
from itertools import product
from ..signal import BaseSignal
from ..misc.utils import multiply, dummy_context_manager
from ..external.progressbar import progressbar
from ..external.astroML.histtools import dasky_histogram
from hyperspy.misc.array_tools import _requires_linear_rebin
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, progressbar=True, close_file=False):
"""Attempt to store the full signal in memory.
close_file: bool
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
"""
if progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError as e:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
super().change_dtype(dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape,
scale=scale, crop=crop, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='freedman', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = dasky_histogram(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
ragged=None,
inplace=True,
**kwargs):
if ragged not in (True, False):
raise ValueError('"ragged" kwarg has to be bool for lazy signals')
_logger.debug("Entering '_map_iterate'")
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(), ) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape) and ragged:
res_shape = (1,)
all_delayed = [dd(func)(data) for data in zip(*iterators)]
if ragged:
sig_shape = ()
sig_dtype = np.dtype('O')
else:
one_compute = all_delayed[0].compute()
sig_shape = one_compute.shape
sig_dtype = one_compute.dtype
pixels = [
da.from_delayed(
res, shape=sig_shape, dtype=sig_dtype) for res in all_delayed
]
for step in reversed(res_shape):
_len = len(pixels)
starts = range(0, _len, step)
ends = range(step, _len + step, step)
pixels = [
da.stack(
pixels[s:e], axis=0) for s, e in zip(starts, ends)
]
result = pixels[0]
res = map_result_construction(
self, inplace, result, ragged, sig_shape, lazy=True)
return res
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(self,
normalize_poissonian_noise=False,
algorithm='svd',
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
bounds=False,
**kwargs):
"""Perform Incremental (Batch) decomposition on the data, keeping n
significant components.
Parameters
----------
normalize_poissonian_noise : bool
If True, scale the SI to normalize Poissonian noise
algorithm : str
One of ('svd', 'PCA', 'ORPCA', 'ONMF'). By default 'svd',
lazy SVD decomposition from dask.
output_dimension : int
the number of significant components to keep. If None, keep all
(only valid for SVD)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain atleast output_dimension signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decompostion.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool
Reproject data on the learnt components (factors) after learning.
**kwargs
passed to the partial_fit/fit functions.
Notes
-----
Various algorithm parameters and their default values:
ONMF:
lambda1=1,
kappa=1,
robust=False,
store_r=False
batch_size=None
ORPCA:
fast=True,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None
PCA:
batch_size=None,
copy=True,
white=False
"""
if bounds:
msg = (
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.")
warnings.warn(msg, VisibleDeprecationWarning)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if algorithm != "svd" and output_dimension is None:
raise ValueError("With the %s the output_dimension "
"must be specified" % algorithm)
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# LEARN
if algorithm == 'PCA':
from sklearn.decomposition import IncrementalPCA
obj = IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
elif algorithm == 'ORPCA':
from hyperspy.learn.rpca import ORPCA
kwg = {'fast': True}
kwg.update(kwargs)
obj = ORPCA(output_dimension, **kwg)
method = partial(obj.fit, iterating=True)
elif algorithm == 'ONMF':
from hyperspy.learn.onmf import ONMF
batch_size = kwargs.pop('batch_size', None)
obj = ONMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "svd":
raise ValueError('algorithm not known')
original_data = self.data
try:
if normalize_poissonian_noise:
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks)
if navigation_mask is None else to_array(
navigation_mask, chunks=nav_chunks))
sm = da.logical_not(
da.zeros(
self.axes_manager.signal_shape[::-1],
chunks=sig_chunks)
if signal_mask is None else to_array(
signal_mask, chunks=sig_chunks))
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))))
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(..., ) + (None, ) * rbH.ndim] *\
rbH[(None, ) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "svd":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask or signal_mask:
raise NotImplemented(
"Masking is not yet implemented for lazy SVD."
)
U, S, V = svd(self.data)
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask),
total=nblocks,
leave=True,
desc='Learn'):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt:
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == 'PCA':
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == 'ORPCA':
_, _, U, S, V = obj.finish()
factors = U * S
loadings = V
explained_variance = S**2 / len(factors)
elif algorithm == 'ONMF':
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == 'PCA':
method = obj.transform
def post(a): return np.concatenate(a, axis=0)
elif algorithm == 'ORPCA':
method = obj.project
obj.R = []
def post(a): return obj.finish()[4]
elif algorithm == 'ONMF':
method = obj.project
def post(a): return np.concatenate(a, axis=1).T
_map = map(lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask))
H = []
try:
for thing in progressbar(
_map, total=nblocks, desc='Project'):
H.append(thing)
except KeyboardInterrupt:
pass
loadings = post(H)
if explained_variance is not None and \
explained_variance_ratio is None:
explained_variance_ratio = \
explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "svd": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings,
ndim,
(output_dimension,),
nav_chunks).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "svd":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
| gpl-3.0 |
sriharshams/mlnd | smartcab/visuals.py | 17 | 7709 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
###########################################
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import ast
def calculate_safety(data):
""" Calculates the safety rating of the smartcab during testing. """
good_ratio = data['good_actions'].sum() * 1.0 / \
(data['initial_deadline'] - data['final_deadline']).sum()
if good_ratio == 1: # Perfect driving
return ("A+", "green")
else: # Imperfect driving
if data['actions'].apply(lambda x: ast.literal_eval(x)[4]).sum() > 0: # Major accident
return ("F", "red")
elif data['actions'].apply(lambda x: ast.literal_eval(x)[3]).sum() > 0: # Minor accident
return ("D", "#EEC700")
elif data['actions'].apply(lambda x: ast.literal_eval(x)[2]).sum() > 0: # Major violation
return ("C", "#EEC700")
else: # Minor violation
minor = data['actions'].apply(lambda x: ast.literal_eval(x)[1]).sum()
if minor >= len(data)/2: # Minor violation in at least half of the trials
return ("B", "green")
else:
return ("A", "green")
def calculate_reliability(data):
""" Calculates the reliability rating of the smartcab during testing. """
success_ratio = data['success'].sum() * 1.0 / len(data)
if success_ratio == 1: # Always meets deadline
return ("A+", "green")
else:
if success_ratio >= 0.90:
return ("A", "green")
elif success_ratio >= 0.80:
return ("B", "green")
elif success_ratio >= 0.70:
return ("C", "#EEC700")
elif success_ratio >= 0.60:
return ("D", "#EEC700")
else:
return ("F", "red")
def plot_trials(csv):
""" Plots the data from logged metrics during a simulation."""
data = pd.read_csv(os.path.join("logs", csv))
if len(data) < 10:
print "Not enough data collected to create a visualization."
print "At least 20 trials are required."
return
# Create additional features
data['average_reward'] = (data['net_reward'] / (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['reliability_rate'] = (data['success']*100).rolling(window=10, center=False).mean() # compute avg. net reward with window=10
data['good_actions'] = data['actions'].apply(lambda x: ast.literal_eval(x)[0])
data['good'] = (data['good_actions'] * 1.0 / \
(data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['minor'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[1]) * 1.0 / \
(data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['major'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[2]) * 1.0 / \
(data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['minor_acc'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[3]) * 1.0 / \
(data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['major_acc'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[4]) * 1.0 / \
(data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean()
data['epsilon'] = data['parameters'].apply(lambda x: ast.literal_eval(x)['e'])
data['alpha'] = data['parameters'].apply(lambda x: ast.literal_eval(x)['a'])
# Create training and testing subsets
training_data = data[data['testing'] == False]
testing_data = data[data['testing'] == True]
plt.figure(figsize=(12,8))
###############
### Average step reward plot
###############
ax = plt.subplot2grid((6,6), (0,3), colspan=3, rowspan=2)
ax.set_title("10-Trial Rolling Average Reward per Action")
ax.set_ylabel("Reward per Action")
ax.set_xlabel("Trial Number")
ax.set_xlim((10, len(training_data)))
# Create plot-specific data
step = training_data[['trial','average_reward']].dropna()
ax.axhline(xmin = 0, xmax = 1, y = 0, color = 'black', linestyle = 'dashed')
ax.plot(step['trial'], step['average_reward'])
###############
### Parameters Plot
###############
ax = plt.subplot2grid((6,6), (2,3), colspan=3, rowspan=2)
# Check whether the agent was expected to learn
if csv != 'sim_no-learning.csv':
ax.set_ylabel("Parameter Value")
ax.set_xlabel("Trial Number")
ax.set_xlim((1, len(training_data)))
ax.set_ylim((0, 1.05))
ax.plot(training_data['trial'], training_data['epsilon'], color='blue', label='Exploration factor')
ax.plot(training_data['trial'], training_data['alpha'], color='green', label='Learning factor')
ax.legend(bbox_to_anchor=(0.5,1.19), fancybox=True, ncol=2, loc='upper center', fontsize=10)
else:
ax.axis('off')
ax.text(0.52, 0.30, "Simulation completed\nwith learning disabled.", fontsize=24, ha='center', style='italic')
###############
### Bad Actions Plot
###############
actions = training_data[['trial','good', 'minor','major','minor_acc','major_acc']].dropna()
maximum = (1 - actions['good']).values.max()
ax = plt.subplot2grid((6,6), (0,0), colspan=3, rowspan=4)
ax.set_title("10-Trial Rolling Relative Frequency of Bad Actions")
ax.set_ylabel("Relative Frequency")
ax.set_xlabel("Trial Number")
ax.set_ylim((0, maximum + 0.01))
ax.set_xlim((10, len(training_data)))
ax.set_yticks(np.linspace(0, maximum+0.01, 10))
ax.plot(actions['trial'], (1 - actions['good']), color='black', label='Total Bad Actions', linestyle='dotted', linewidth=3)
ax.plot(actions['trial'], actions['minor'], color='orange', label='Minor Violation', linestyle='dashed')
ax.plot(actions['trial'], actions['major'], color='orange', label='Major Violation', linewidth=2)
ax.plot(actions['trial'], actions['minor_acc'], color='red', label='Minor Accident', linestyle='dashed')
ax.plot(actions['trial'], actions['major_acc'], color='red', label='Major Accident', linewidth=2)
ax.legend(loc='upper right', fancybox=True, fontsize=10)
###############
### Rolling Success-Rate plot
###############
ax = plt.subplot2grid((6,6), (4,0), colspan=4, rowspan=2)
ax.set_title("10-Trial Rolling Rate of Reliability")
ax.set_ylabel("Rate of Reliability")
ax.set_xlabel("Trial Number")
ax.set_xlim((10, len(training_data)))
ax.set_ylim((-5, 105))
ax.set_yticks(np.arange(0, 101, 20))
ax.set_yticklabels(['0%', '20%', '40%', '60%', '80%', '100%'])
# Create plot-specific data
trial = training_data.dropna()['trial']
rate = training_data.dropna()['reliability_rate']
# Rolling success rate
ax.plot(trial, rate, label="Reliability Rate", color='blue')
###############
### Test results
###############
ax = plt.subplot2grid((6,6), (4,4), colspan=2, rowspan=2)
ax.axis('off')
if len(testing_data) > 0:
safety_rating, safety_color = calculate_safety(testing_data)
reliability_rating, reliability_color = calculate_reliability(testing_data)
# Write success rate
ax.text(0.40, .9, "{} testing trials simulated.".format(len(testing_data)), fontsize=14, ha='center')
ax.text(0.40, 0.7, "Safety Rating:", fontsize=16, ha='center')
ax.text(0.40, 0.42, "{}".format(safety_rating), fontsize=40, ha='center', color=safety_color)
ax.text(0.40, 0.27, "Reliability Rating:", fontsize=16, ha='center')
ax.text(0.40, 0, "{}".format(reliability_rating), fontsize=40, ha='center', color=reliability_color)
else:
ax.text(0.36, 0.30, "Simulation completed\nwith testing disabled.", fontsize=20, ha='center', style='italic')
plt.tight_layout()
plt.show()
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/user_interfaces/embedding_in_qt.py | 3 | 4389 | #! /usr/bin/env python
# embedding_in_qt.py --- Simple Qt application embedding matplotlib canvases
#
# Copyright (C) 2005 Florent Rougon
#
# This file is an example program for matplotlib. It may be used and
# modified with no restriction; raw copies as well as modified versions
# may be distributed without limitation.
from __future__ import unicode_literals
import sys, os, random
from qt import *
from numpy import arange, sin, pi
from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
# This seems to be what PyQt expects, according to the examples shipped in
# its distribution.
TRUE = 1
FALSE = 0
progname = os.path.basename(sys.argv[0])
progversion = "0.1"
# Note: color-intensive applications may require a different color allocation
# strategy.
#QApplication.setColorSpec(QApplication.NormalColor)
app = QApplication(sys.argv)
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
self.reparent(parent, QPoint(0, 0))
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def sizeHint(self):
w, h = self.get_width_height()
return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QTimer(self, "canvas update timer")
QObject.connect(timer, SIGNAL("timeout()"), self.update_figure)
timer.start(1000, FALSE)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [ random.randint(0, 10) for i in range(4) ]
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self, None,
"application main window",
Qt.WType_TopLevel | Qt.WDestructiveClose)
self.file_menu = QPopupMenu(self)
self.file_menu.insertItem('&Quit', self.fileQuit, Qt.CTRL + Qt.Key_Q)
self.menuBar().insertItem('&File', self.file_menu)
self.help_menu = QPopupMenu(self)
self.menuBar().insertSeparator()
self.menuBar().insertItem('&Help', self.help_menu)
self.help_menu.insertItem('&About', self.about)
self.main_widget = QWidget(self, "Main widget")
l = QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().message("All hail matplotlib!", 2000)
def fileQuit(self):
qApp.exit(0)
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QMessageBox.about(self, "About %s" % progname,
"""%(prog)s version %(version)s
Copyright \N{COPYRIGHT SIGN} 2005 Florent Rougon
This program is a simple example of a Qt application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation."""
% {"prog": progname, "version": progversion})
aw = ApplicationWindow()
aw.setCaption("%s" % progname)
qApp.setMainWidget(aw)
aw.show()
sys.exit(qApp.exec_loop())
| mit |
jereze/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/ipykernel/eventloops.py | 5 | 9098 | # encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import platform
import zmq
from distutils.version import LooseVersion as V
from traitlets.config.application import Application
from IPython.utils import io
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
if _use_appnope() and kernel._darwin_app_nap:
from appnope import nope_scope as context
else:
from contextlib import contextmanager
@contextmanager
def context():
yield
def process_stream_events():
while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN:
with context():
kernel.do_one_iteration()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
# mapping of keys to loop functions
loop_map = {
'inline': None,
'nbagg': None,
'notebook': None,
'ipympl': None,
None : None,
}
def register_integration(*toolkitnames):
"""Decorator to register an event loop to integrate with the IPython kernel
The decorator takes names to register the event loop as for the %gui magic.
You can provide alternative names for the same toolkit.
The decorated function should take a single argument, the IPython kernel
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
at least every ``kernel._poll_interval`` seconds, and start the event loop.
:mod:`ipykernel.eventloops` provides and registers such functions
for a few common event loops.
"""
def decorator(func):
for name in toolkitnames:
loop_map[name] = func
return func
return decorator
@register_integration('qt', 'qt4')
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
for s in kernel.shell_streams:
_notify_stream_qt(kernel, s)
start_event_loop_qt4(kernel.app)
@register_integration('qt5')
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration."""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
@register_integration('wx')
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
from IPython.lib.guisupport import start_event_loop_wx
if _use_appnope() and kernel._darwin_app_nap:
# we don't hook up App Nap contexts for Wx,
# just disable it outright.
from appnope import nope
nope()
doi = kernel.do_one_iteration
# Wx uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to drive the ZMQ event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(doi)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
start_event_loop_wx(kernel.app)
@register_integration('tk')
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# For Tkinter, we create a Tk object and call its withdraw method.
class Timer(object):
def __init__(self, func):
self.app = Tk()
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.timer = Timer(doi)
kernel.timer.start()
@register_integration('gtk')
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
@register_integration('gtk3')
def loop_gtk3(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtk3embed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
@register_integration('osx')
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop
| gpl-3.0 |
BhallaLab/moose | moose-examples/neuroml/LIF/twoLIFxml_firing.py | 2 | 3087 | # -*- coding: utf-8 -*-
## all SI units
########################################################################################
## Plot the membrane potential for a leaky integrate and fire neuron with current injection
## Author: Aditya Gilra
## Creation Date: 2012-06-08
## Modification Date: 2012-06-08
########################################################################################
#import os
#os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('../../../python')
## simulation parameters
SIMDT = 5e-5 # seconds
PLOTDT = 5e-5 # seconds
RUNTIME = 2.0 # seconds
injectI = 1e-8#2.5e-12 # Amperes
## moose imports
import moose
from moose.neuroml import *
from moose.utils import * # has setupTable(), resetSim() etc
import math
## import numpy and matplotlib in matlab style commands
from pylab import *
def create_twoLIFs():
NML = NetworkML({'temperature':37.0,'model_dir':'.'})
## Below returns populationDict = { 'populationname1':(cellname,{instanceid1:moosecell, ... }) , ... }
## and projectionDict = { 'projectionname1':(source,target,[(syn_name1,pre_seg_path,post_seg_path),...]) , ... }
(populationDict,projectionDict) = \
NML.readNetworkMLFromFile('twoLIFs.net.xml', {}, params={})
return populationDict,projectionDict
def run_twoLIFs():
## reset and run the simulation
print("Reinit MOOSE.")
## from moose_utils.py sets clocks and resets
resetSim(['/cells[0]'], SIMDT, PLOTDT, simmethod='ee')
print("Running now...")
moose.start(RUNTIME)
if __name__ == '__main__':
populationDict,projectionDict = create_twoLIFs()
## element returns the right element and error if not present
IF1Soma = moose.element(populationDict['LIFs'][1][0].path+'/soma_0')
IF1Soma.inject = injectI
IF2Soma = moose.element(populationDict['LIFs'][1][1].path+'/soma_0')
IF2Soma.inject = 0.0#injectI*2.0
#IF2Soma.inject = injectI
IF1vmTable = setupTable("vmTableIF1",IF1Soma,'Vm')
IF2vmTable = setupTable("vmTableIF2",IF2Soma,'Vm')
table_path = moose.element(IF1Soma.path+'/data').path
IF1spikesTable = moose.Table(table_path+'/spikesTable')
moose.connect(IF1Soma,'spikeOut',IF1spikesTable,'input') ## spikeGen gives spiketimes
## record Gk of the synapse on IF2
#print IF2Soma.children
IF2SynChanTable = moose.Table(table_path+'/synChanTable')
moose.connect(IF2SynChanTable,'requestOut',IF2Soma.path+'/exc_syn','getIk')
run_twoLIFs()
print(("Spiketimes :",IF1spikesTable.vector))
## plot the membrane potential of the neuron
timevec = arange(0.0,RUNTIME+PLOTDT/2.0,PLOTDT)
figure(facecolor='w')
plot(timevec, IF1vmTable.vector*1000,'r-')
xlabel('time(s)')
ylabel('Vm (mV)')
title('Vm of presynaptic IntFire')
figure(facecolor='w')
plot(timevec, IF2vmTable.vector*1000,'b-')
xlabel('time(s)')
ylabel('Vm (mV)')
title('Vm of postsynaptic IntFire')
figure(facecolor='w')
plot(timevec, IF2SynChanTable.vector*1e12,'b-')
xlabel('time(s)')
ylabel('Ik (pA)')
title('Ik entering postsynaptic IntFire')
show()
| gpl-3.0 |
cesans/mapache | doc/source/conf.py | 1 | 10103 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mapache documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 4 19:22:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import mock
sys.path.insert(0, os.path.abspath('../../'))
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pylab', 'sklearn',
'sklearn.cluster', 'sklearn.utils', 'sklearn.gaussian_process']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mapache'
copyright = '2016, Carlos Sanchez'
author = 'Carlos Sanchez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'mapache v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mapachedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mapache.tex', 'mapache Documentation',
'Carlos Sanchez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mapache', 'mapache Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mapache', 'mapache Documentation',
author, 'mapache', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
| bsd-3-clause |
harkrish1/sp17-i524 | project/S17-IO-3017/code/projectearth/kmeansplot.py | 14 | 3018 | import requests
import time
import dblayer
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import random
from sklearn.cluster import KMeans
import testfile
NUM_CLUSTER = 3
def generate_color():
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(NUM_CLUSTER)))
return color
# Create random colors in list
color_list = []
for i in range(NUM_CLUSTER):
color_list.append(generate_color())
def showMagnitudesInCluster(data):
kmeans = KMeans(n_clusters=NUM_CLUSTER)
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
plot_data = []
for i in range(NUM_CLUSTER):
ds = data[np.where(labels == i)]
clustername = "Cluster " + str(i+1)
trace = go.Scatter(x=ds[:, 0], y=ds[:, 1], mode='markers', showlegend='false', name=clustername, marker=dict(size=5, color=color_list[i]))
plot_data.append(trace)
# plot the centroids
trace = go.Scatter(x=centroids[i, 0], y=centroids[i, 1], mode='markers', marker=dict(size=10, color='black'))
plot_data.append(trace)
layout = go.Layout(title='Magnitude Vs. Depth - K-Means Clusters', titlefont=dict(family='Courier New, monospace',size=20,color='#7f7f7f'),
xaxis=dict(title='Depth of Earthquake', titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),
yaxis=dict(title='Magnitude',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f'))
)
fig = go.Figure(data=plot_data, layout=layout)
div = plotly.offline.plot(fig, include_plotlyjs=True, output_type='div')
return div
def mkMag():
#### TME: Get start time
start_time = time.time()
####
sess = requests.Session()
dbobj = dblayer.classDBLayer()
projection = [
{"$project": {"_id": 0, "mag": "$properties.mag", "depth": {"$arrayElemAt": ["$geometry.coordinates", 2]}}}]
dframe_mag = pd.DataFrame(list(dbobj.doaggregate(projection)))
#### TME: Elapsed time taken to read data from MongoDB
fileobj = testfile.classFileWrite()
elapsed = time.time() - start_time
fileobj.writeline()
str1 = str(elapsed) + " secs required to read " + str(dframe_mag['depth'].count()) + " records from database."
fileobj.writelog("Reading Magnitude and Depth data")
fileobj.writelog(str1)
####
#### TME: Get start time
start_time = time.time()
####
div = showMagnitudesInCluster(dframe_mag.values)
response = """<html><title></title><head><meta charset=\"utf8\"> </head> <body>""" + div + """</body> </html>"""
#### TME: Elapsed time taken to cluster and plot data
elapsed = time.time() - start_time
fileobj.writeline()
str1 = "Applying K-Means clustering and plotting its output \n" + "Time taken: " + str(elapsed)
fileobj.writelog(str1)
fileobj.writeline()
fileobj.closefile()
dbobj.closedb()
return response
| apache-2.0 |
johannes-scharlach/pod-control | src/analysis.py | 1 | 16379 | """Analyze how well a system can be reduced by POD methods.
Evaluate pod.py and how well it fits a particular problem. This file contains
helper functions to compare reductions, create plots and creat TeX tables.
Notes
-----
This file should also take care of profiling in the future.
"""
from __future__ import division, print_function
import random
import math
import numpy as np
from scipy import linalg
from matplotlib.pyplot import plot, subplot, legend, figure, semilogy
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import example2sys as e2s
import pod
import time
font_options = {}
def _systemsToReduce(k_bal_trunc, k_cont_trunc):
red_sys = []
for k in k_bal_trunc:
if k:
with_k_str = "\nwith k = {}".format(k)
else:
with_k_str = ""
red_sys.append({"name": "balanced truncation" + with_k_str,
"shortname": "BT",
"reduction": "truncation_square_root_trans_matrix",
"k": k})
for k in k_cont_trunc:
with_k_str = "\nwith k = {}".format(k)
red_sys.append({"name": "controllability truncation" + with_k_str,
"shortname": "CT",
"reduction": "controllability_truncation",
"k": k})
return red_sys
def _relativeErrors(Y, Yhat, min_error=0.):
diff = Y - Yhat
Y_above_min = np.where(abs(diff) <= min_error,
np.copysign(np.inf, diff),
np.copysign(Y, diff))
err = diff / Y_above_min
return diff, err
def reducedAnalysis1D(unred_sys, k=10, k2=28,
T0=0., T=1., number_of_steps=100):
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k2] * (k2 is not None) + [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("===============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(T0, T, number_of_steps))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["diff"], system["rel_eps"] = \
zip(*[_relativeErrors(y, yhat, system.get("error_bound", 0.))
for y, yhat in zip(Y, system["Y"])])
system["eps"] = [linalg.norm(diff, ord=norm_order)
for diff in system["diff"]]
print("and a maximal error of {}".format(max(system["eps"])))
print("and an error at t=T of {}".format(system["eps"][-1]))
print("==============\nPLOTS\n==============")
figure(1)
for system in systems:
plot(timeSteps, system["Y"], label=system["name"])
legend(loc="lower right")
figure(2)
for system in systems[1:4]:
subplot(1, 2, 1)
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
for system in systems[4:]:
subplot(1, 2, 2)
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
markers = ['o', 'v', '*', 'x', 'd']
figure(3)
for system, marker in zip(systems[1:], markers):
sv = list(system["sys"].hsv)
semilogy(range(len(sv)), sv,
marker=marker, label=system["name"])
legend(loc="lower left")
return systems
def reducedAnalysis2D(unred_sys, control, k=10, k2=None,
T0=0., T=1., L=1., number_of_steps=100,
picture_destination=
"../plots/plot_{}_t{:.2f}_azim_{}.png"):
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k2] * (k2 is not None) + [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(T0, T, number_of_steps))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["diff"], system["rel_eps"] = \
zip(*[_relativeErrors(y, yhat, system.get("error_bound", 0.))
for y, yhat in zip(Y, system["Y"])])
system["eps"] = [linalg.norm(diff, ord=norm_order)
for diff in system["diff"]]
print("and a maximal error of {}".format(max(system["eps"])))
print("and an error at t=T of {}".format(system["eps"][-1]))
print("==============\nPLOTS\n==============")
draw_figures = raw_input("Do you want to draw figures? (y/N) ")
if draw_figures == "y":
figure(2)
for system in systems[1:]:
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
fig = figure()
number_of_outputs = len(Y[0])
X, Y = [], []
for i in range(len(timeSteps)):
X.append([timeSteps[i] for _ in range(number_of_outputs)])
Y.append([j*L/(number_of_outputs-1)
for j in range(number_of_outputs)])
axes = []
for system in range(len(systems)):
axes.append(fig.add_subplot(221+system+10*(len(systems) > 4),
projection='3d'))
Z = []
for i in range(len(timeSteps)):
Z.append(list(systems[system]["Y"][i]))
axes[-1].plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.coolwarm,
linewidth=0, antialiased=False)
axes[-1].set_title(systems[system]["name"])
axes[-1].set_xlabel("t")
axes[-1].set_ylabel("l")
axes[-1].set_zlabel("temperature")
save_figures = raw_input("Do you want to save the figures? (y/N) ")
if save_figures == "y":
for ii in xrange(360, 0, -10):
for ax in axes:
ax.azim = ii
fig.savefig(picture_destination.format(control, T,
axes[0].azim))
return systems
def controllableHeatSystemComparison(N=1000, k=None, k2=None,
r=0.05, T0=0., T=1., L=1.,
number_of_steps=100,
control="sin",
integrator="dopri5",
integrator_options={}):
if k is None:
k = max(1, int(N/50))
print("SETUP\n====================")
unred_sys = [{"name": "Controllable heat equation"}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.controllableHeatSystem(N=N, L=L,
control=control)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
pic_path = "../plots/controllable_heat_{}_t{:.2f}_azim_{}.png"
reducedAnalysis2D(unred_sys, control, k, k2, T0, T, L, number_of_steps,
picture_destination=pic_path)
def optionPricingComparison(N=1000, k=None,
option="put", r=0.05, T=1., K=10., L=None,
integrator="dopri5", integrator_options={}):
if k is None:
k = max(1, int(N/50))
if L is None:
L = 3 * K
print("SETUP\n====================")
unred_sys = [{"name": ("Heat equation for {} option pricing" +
" with n = {}").format(option, N)}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.optionPricing(N=N, option=option,
r=r, T=T, K=K, L=L)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(0, T, 30))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["eps"] = [0.] + [linalg.norm(y-yhat, ord=norm_order)
for y, yhat in zip(Y[1:], system["Y"][1:])]
print("and a maximal error of", max(system["eps"]))
print("==============\nPLOTS\n==============")
fig = figure(1)
N2 = int(1.5*K*N/L)
X, Y = [], []
for i in range(len(timeSteps)):
X.append([timeSteps[i] for _ in range(N2)])
Y.append([j*L/N for j in range(N2)])
axes = []
for system in range(6):
axes.append(fig.add_subplot(221+system, projection='3d'))
Z = []
for i in range(len(timeSteps)):
Z.append(list(systems[system]["Y"][i])[:N2])
axes[-1].plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
axes[-1].set_title(systems[system]["name"], **font_options)
axes[-1].set_xlabel("t", **font_options)
axes[-1].set_ylabel("K", **font_options)
axes[-1].set_zlabel("Lambda(K)", **font_options)
for ax in axes:
ax.azim = 26
fig.savefig("../plots/{}_option_azim_{}.png".format(option, axes[0].azim))
figure(2)
for system in systems[1:]:
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
def thermalRCNetworkComparison(R=1e90, C=1e87, n=100, k=10, k2=28, r=3,
T0=0., T=1., omega=math.pi, number_of_steps=100,
control="sin", input_scale=1.,
integrator="dopri5",
integrator_options={}):
u = lambda t, x=None: np.array([e2s.simple_functions[control](omega*t)])
print("===============\nSETUP\n===============")
unred_sys = [{"name": "Thermal RC Netwok with n = {}".format(n)}]
print(unred_sys[0]["name"])
with Timer():
C0, unred_sys[0]["sys"] = e2s.thermalRCNetwork(R, C, n, r, u,
input_scale=input_scale)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
reducedAnalysis1D(unred_sys, k, k2, T0, T, number_of_steps)
def loadHeat(k=10, k2=28, T0=0., T=1., number_of_steps=100,
control="sin", omega=math.pi, control_scale=1.,
all_state_vars=False,
integrator="dopri5",
integrator_options={}):
u = lambda t, x=None: np.array([e2s.simple_functions[control](omega*t) *
control_scale])
unred_sys = [{"name": "Heat equation from\nthe SLICOT benchmarks",
"shortname": "Heat eq"}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.example2sys("heat-cont.mat")
unred_sys[0]["sys"].control = u
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
if all_state_vars:
unred_sys[0]["sys"].C = np.eye(unred_sys[0]["sys"].order)
return unred_sys
def compareHeat(k=10, k2=28, T0=0., T=10., number_of_steps=300,
control="sin", omega=math.pi, control_scale=1.,
integrator="dopri5",
integrator_options={}):
unred_sys = loadHeat(k, k2, T0, T, number_of_steps,
control, omega, control_scale,
False,
integrator, integrator_options)
systems = reducedAnalysis1D(unred_sys, k, k2, T0, T, number_of_steps)
return systems
def compareHeatStates(k=10, k2=37, T0=0., T=10., number_of_steps=300,
control="sin", omega=math.pi, control_scale=1.,
integrator="dopri5",
integrator_options={}):
unred_sys = loadHeat(k, k2, T0, T, number_of_steps,
control, omega, control_scale,
True,
integrator, integrator_options)
L = 1.
pic_path = "../plots/slicot_heat_{}_t{:.2f}_azim_{}.png"
systems = reducedAnalysis2D(unred_sys, control, k, k2, T0, T, L,
number_of_steps,
picture_destination=pic_path)
return systems
def reduce(sys, red_sys):
for system in red_sys:
print(system["name"])
with Timer():
system["sys"] = \
pod.lss(sys,
reduction=system.get("reduction", None),
k=system.get("k", None))
system["error_bound"] = system["sys"].hsv[0] * \
np.finfo(float).eps * sys.order
return red_sys
def tableFormat(systems, eps=True, rel_eps=False, solving_time=False,
hankel_norm=False, min_tol=False):
th = ("Reduction"
" & Order")
tb_template = ("\\\\\midrule\n{:7s}"
" & \\num{{{:3d}}}")
if (eps + rel_eps) == 2:
th += (" & \\multicolumn{2}{r|}{\\specialcell[r]{Max. \\& Rel. Error"
"\\\\at $0\\leq t \\leq T$}}"
" & \\multicolumn{2}{r|}{\\specialcell[r]{Max. \\& Rel. Error"
"\\\\at $t=T$}}")
tb_template += (" & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
" & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}")
elif eps:
th += (" & \\specialcell[r]{Max. Error\\\\at $0\\leq t \\leq T$}"
" & \\specialcell[r]{Max. Error\\\\at $t=T$}")
tb_template += " & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
elif rel_eps:
th += (" & \\specialcell[r]{Rel. Error\\\\at $0\\leq t \\leq T$}"
" & \\specialcell[r]{Rel. Error\\\\at $t=T$}")
tb_template += " & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
if solving_time:
th += " & \\specialcell[r]{Solving Time}"
tb_template += " & \\SI{{{:8.3e}}}{{\\second}}"
if hankel_norm:
th += " & Hankel Norm"
tb_template += " & \\num{{{:15.10e}}}"
if min_tol:
th += " & Minimal Tolerance"
tb_template += " & \\num{{{:15.10e}}}"
tb = []
for system in systems:
results = [
system.get("shortname", "Original"),
system["sys"].order
]
if eps:
try:
results.append(max(system["eps"]))
except KeyError:
results.append(0.)
if rel_eps:
try:
results.append(max(map(max, system["rel_eps"])))
except KeyError:
results.append(0.)
if eps:
try:
results.append(system["eps"][-1])
except KeyError:
results.append(0.)
if rel_eps:
try:
results.append(max(system["rel_eps"][-1]))
except KeyError:
results.append(0.)
if solving_time:
results.append(0.)
if hankel_norm:
results.append(system["sys"].hsv[0])
if min_tol:
try:
results.append(system["error_bound"])
except KeyError:
results.append(0.)
tb.append(tb_template.format(*results))
table = th
for line in tb:
table += line
print(table)
| mit |
aetilley/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
weixuanfu2016/tpot | tpot/config/classifier_sparse.py | 3 | 3726 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
classifier_config_sparse = {
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25]
},
'sklearn.neighbors.KNeighborsClassifier': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': np.arange(0.05, 1.01, 0.05)
},
'sklearn.feature_selection.RFE': {
'step': np.arange(0.05, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
'sklearn.naive_bayes.BernoulliNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.naive_bayes.MultinomialNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.svm.LinearSVC': {
'penalty': ["l1", "l2"],
'loss': ["hinge", "squared_hinge"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.]
},
'xgboost.XGBClassifier': {
'n_estimators': [100],
'max_depth': range(1, 11),
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'subsample': np.arange(0.05, 1.01, 0.05),
'min_child_weight': range(1, 21),
'nthread': [1]
}
}
| lgpl-3.0 |
mdhaber/scipy | scipy/interpolate/_rbfinterp.py | 7 | 16523 | """Module for RBF interpolation."""
import warnings
from itertools import combinations_with_replacement
import numpy as np
from numpy.linalg import LinAlgError
from scipy.spatial import KDTree
from scipy.special import comb
from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
from ._rbfinterp_pythran import _build_system, _evaluate, _polynomial_matrix
__all__ = ["RBFInterpolator"]
# These RBFs are implemented.
_AVAILABLE = {
"linear",
"thin_plate_spline",
"cubic",
"quintic",
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian"
}
# The shape parameter does not need to be specified when using these RBFs.
_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
# For RBFs that are conditionally positive definite of order m, the interpolant
# should include polynomial terms with degree >= m - 1. Define the minimum
# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
# are positive definite and do not need polynomial terms.
_NAME_TO_MIN_DEGREE = {
"multiquadric": 0,
"linear": 0,
"thin_plate_spline": 1,
"cubic": 1,
"quintic": 2
}
def _monomial_powers(ndim, degree):
"""Return the powers for each monomial in a polynomial.
Parameters
----------
ndim : int
Number of variables in the polynomial.
degree : int
Degree of the polynomial.
Returns
-------
(nmonos, ndim) int ndarray
Array where each row contains the powers for each variable in a
monomial.
"""
nmonos = comb(degree + ndim, ndim, exact=True)
out = np.zeros((nmonos, ndim), dtype=int)
count = 0
for deg in range(degree + 1):
for mono in combinations_with_replacement(range(ndim), deg):
# `mono` is a tuple of variables in the current monomial with
# multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
for var in mono:
out[count, var] += 1
count += 1
return out
def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
"""Build and solve the RBF interpolation system of equations.
Parameters
----------
y : (P, N) float ndarray
Data point coordinates.
d : (P, S) float ndarray
Data values at `y`.
smoothing : (P,) float ndarray
Smoothing parameter for each data point.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
Returns
-------
coeffs : (P + R, S) float ndarray
Coefficients for each RBF and monomial.
shift : (N,) float ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
"""
lhs, rhs, shift, scale = _build_system(
y, d, smoothing, kernel, epsilon, powers
)
_, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
if info < 0:
raise ValueError(f"The {-info}-th argument had an illegal value.")
elif info > 0:
msg = "Singular matrix."
nmonos = powers.shape[0]
if nmonos > 0:
pmat = _polynomial_matrix((y - shift)/scale, powers)
rank = np.linalg.matrix_rank(pmat)
if rank < nmonos:
msg = (
"Singular matrix. The matrix of monomials evaluated at "
"the data point coordinates does not have full column "
f"rank ({rank}/{nmonos})."
)
raise LinAlgError(msg)
return shift, scale, coeffs
class RBFInterpolator:
"""Radial basis function (RBF) interpolation in N dimensions.
Parameters
----------
y : (P, N) array_like
Data point coordinates.
d : (P, ...) array_like
Data values at `y`.
neighbors : int, optional
If specified, the value of the interpolant at each evaluation point
will be computed using only this many nearest data points. All the data
points are used by default.
smoothing : float or (P,) array_like, optional
Smoothing parameter. The interpolant perfectly fits the data when this
is set to 0. For large values, the interpolant approaches a least
squares fit of a polynomial with the specified degree. Default is 0.
kernel : str, optional
Type of RBF. This should be one of
- 'linear' : ``-r``
- 'thin_plate_spline' : ``r**2 * log(r)``
- 'cubic' : ``r**3``
- 'quintic' : ``-r**5``
- 'multiquadric' : ``-sqrt(1 + r**2)``
- 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
- 'inverse_quadratic' : ``1/(1 + r**2)``
- 'gaussian' : ``exp(-r**2)``
Default is 'thin_plate_spline'.
epsilon : float, optional
Shape parameter that scales the input to the RBF. If `kernel` is
'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
1 and can be ignored because it has the same effect as scaling the
smoothing parameter. Otherwise, this must be specified.
degree : int, optional
Degree of the added polynomial. For some RBFs the interpolant may not
be well-posed if the polynomial degree is too small. Those RBFs and
their corresponding minimum degrees are
- 'multiquadric' : 0
- 'linear' : 0
- 'thin_plate_spline' : 1
- 'cubic' : 1
- 'quintic' : 2
The default value is the minimum degree for `kernel` or 0 if there is
no minimum degree. Set this to -1 for no added polynomial.
Notes
-----
An RBF is a scalar valued function in N-dimensional space whose value at
:math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
is the center of the RBF.
An RBF interpolant for the vector of data values :math:`d`, which are from
locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
plus a polynomial with a specified degree. The RBF interpolant is written
as
.. math::
f(x) = K(x, y) a + P(x) b,
where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
monomials, which span polynomials with the specified degree, evaluated at
:math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
linear equations
.. math::
(K(y, y) + \\lambda I) a + P(y) b = d
and
.. math::
P(y)^T a = 0,
where :math:`\\lambda` is a non-negative smoothing parameter that controls
how well we want to fit the data. The data are fit exactly when the
smoothing parameter is 0.
The above system is uniquely solvable if the following requirements are
met:
- :math:`P(y)` must have full column rank. :math:`P(y)` always has full
column rank when `degree` is -1 or 0. When `degree` is 1,
:math:`P(y)` has full column rank if the data point locations are not
all collinear (N=2), coplanar (N=3), etc.
- If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
'cubic', or 'quintic', then `degree` must not be lower than the
minimum value listed above.
- If `smoothing` is 0, then each data point location must be distinct.
When using an RBF that is not scale invariant ('multiquadric',
'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
shape parameter must be chosen (e.g., through cross validation). Smaller
values for the shape parameter correspond to wider RBFs. The problem can
become ill-conditioned or singular when the shape parameter is too small.
The memory required to solve for the RBF interpolation coefficients
increases quadratically with the number of data points, which can become
impractical when interpolating more than about a thousand data points.
To overcome memory limitations for large interpolation problems, the
`neighbors` argument can be specified to compute an RBF interpolant for
each evaluation point using only the nearest data points.
.. versionadded:: 1.7.0
See Also
--------
NearestNDInterpolator
LinearNDInterpolator
CloughTocher2DInterpolator
References
----------
.. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
World Scientific Publishing Co.
.. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
.. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
.. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
Examples
--------
Demonstrate interpolating scattered data to a grid in 2-D.
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import RBFInterpolator
>>> from scipy.stats.qmc import Halton
>>> rng = np.random.default_rng()
>>> xobs = 2*Halton(2, seed=rng).random(100) - 1
>>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
>>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
>>> xflat = xgrid.reshape(2, -1).T
>>> yflat = RBFInterpolator(xobs, yobs)(xflat)
>>> ygrid = yflat.reshape(50, 50)
>>> fig, ax = plt.subplots()
>>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
>>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
>>> fig.colorbar(p)
>>> plt.show()
"""
def __init__(self, y, d,
neighbors=None,
smoothing=0.0,
kernel="thin_plate_spline",
epsilon=None,
degree=None):
y = np.asarray(y, dtype=float, order="C")
if y.ndim != 2:
raise ValueError("`y` must be a 2-dimensional array.")
ny, ndim = y.shape
d_dtype = complex if np.iscomplexobj(d) else float
d = np.asarray(d, dtype=d_dtype, order="C")
if d.shape[0] != ny:
raise ValueError(
f"Expected the first axis of `d` to have length {ny}."
)
d_shape = d.shape[1:]
d = d.reshape((ny, -1))
# If `d` is complex, convert it to a float array with twice as many
# columns. Otherwise, the LHS matrix would need to be converted to
# complex and take up 2x more memory than necessary.
d = d.view(float)
if np.isscalar(smoothing):
smoothing = np.full(ny, smoothing, dtype=float)
else:
smoothing = np.asarray(smoothing, dtype=float, order="C")
if smoothing.shape != (ny,):
raise ValueError(
"Expected `smoothing` to be a scalar or have shape "
f"({ny},)."
)
kernel = kernel.lower()
if kernel not in _AVAILABLE:
raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
if epsilon is None:
if kernel in _SCALE_INVARIANT:
epsilon = 1.0
else:
raise ValueError(
"`epsilon` must be specified if `kernel` is not one of "
f"{_SCALE_INVARIANT}."
)
else:
epsilon = float(epsilon)
min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
if degree is None:
degree = max(min_degree, 0)
else:
degree = int(degree)
if degree < -1:
raise ValueError("`degree` must be at least -1.")
elif degree < min_degree:
warnings.warn(
f"`degree` should not be below {min_degree} when `kernel` "
f"is '{kernel}'. The interpolant may not be uniquely "
"solvable, and the smoothing parameter may have an "
"unintuitive effect.",
UserWarning
)
if neighbors is None:
nobs = ny
else:
# Make sure the number of nearest neighbors used for interpolation
# does not exceed the number of observations.
neighbors = int(min(neighbors, ny))
nobs = neighbors
powers = _monomial_powers(ndim, degree)
# The polynomial matrix must have full column rank in order for the
# interpolant to be well-posed, which is not possible if there are
# fewer observations than monomials.
if powers.shape[0] > nobs:
raise ValueError(
f"At least {powers.shape[0]} data points are required when "
f"`degree` is {degree} and the number of dimensions is {ndim}."
)
if neighbors is None:
shift, scale, coeffs = _build_and_solve_system(
y, d, smoothing, kernel, epsilon, powers
)
# Make these attributes private since they do not always exist.
self._shift = shift
self._scale = scale
self._coeffs = coeffs
else:
self._tree = KDTree(y)
self.y = y
self.d = d
self.d_shape = d_shape
self.d_dtype = d_dtype
self.neighbors = neighbors
self.smoothing = smoothing
self.kernel = kernel
self.epsilon = epsilon
self.powers = powers
def __call__(self, x):
"""Evaluate the interpolant at `x`.
Parameters
----------
x : (Q, N) array_like
Evaluation point coordinates.
Returns
-------
(Q, ...) ndarray
Values of the interpolant at `x`.
"""
x = np.asarray(x, dtype=float, order="C")
if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional array.")
nx, ndim = x.shape
if ndim != self.y.shape[1]:
raise ValueError(
"Expected the second axis of `x` to have length "
f"{self.y.shape[1]}."
)
if self.neighbors is None:
out = _evaluate(
x, self.y, self.kernel, self.epsilon, self.powers, self._shift,
self._scale, self._coeffs
)
else:
# Get the indices of the k nearest observation points to each
# evaluation point.
_, yindices = self._tree.query(x, self.neighbors)
if self.neighbors == 1:
# `KDTree` squeezes the output when neighbors=1.
yindices = yindices[:, None]
# Multiple evaluation points may have the same neighborhood of
# observation points. Make the neighborhoods unique so that we only
# compute the interpolation coefficients once for each
# neighborhood.
yindices = np.sort(yindices, axis=1)
yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
# `inv` tells us which neighborhood will be used by each evaluation
# point. Now we find which evaluation points will be using each
# neighborhood.
xindices = [[] for _ in range(len(yindices))]
for i, j in enumerate(inv):
xindices[j].append(i)
out = np.empty((nx, self.d.shape[1]), dtype=float)
for xidx, yidx in zip(xindices, yindices):
# `yidx` are the indices of the observations in this
# neighborhood. `xidx` are the indices of the evaluation points
# that are using this neighborhood.
xnbr = x[xidx]
ynbr = self.y[yidx]
dnbr = self.d[yidx]
snbr = self.smoothing[yidx]
shift, scale, coeffs = _build_and_solve_system(
ynbr, dnbr, snbr, self.kernel, self.epsilon, self.powers,
)
out[xidx] = _evaluate(
xnbr, ynbr, self.kernel, self.epsilon, self.powers, shift,
scale, coeffs
)
out = out.view(self.d_dtype)
out = out.reshape((nx,) + self.d_shape)
return out
| bsd-3-clause |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAIndicator/hurst.py | 2 | 4264 |
import numpy as np
import math
import os
import sys
import matplotlib.pyplot as plt
class RSanalysis:
'''Performs RS analysis on data stored in a List()'''
def __init__(self):
pass
def run(self, series, exponent=None):
'''
:type series: List
:type exponent: int
:rtype: float
'''
try:
return self.calculateHurst(series, exponent)
except Exception as e:
print(" Error: %s" % e)
def bestExponent(self, seriesLenght):
'''
:type seriesLenght: int
:rtype: int
'''
i = 0
cont = True
while(cont):
if(int(seriesLenght/int(math.pow(2, i))) <= 1):
cont = False
else:
i += 1
return int(i-1)
def mean(self, series, start, limit):
'''
:type start: int
:type limit: int
:rtype: float
'''
return float(np.mean(series[start:limit]))
def sumDeviation(self, deviation):
'''
:type deviation: list()
:rtype: list()
'''
return np.cumsum(deviation)
def deviation(self, series, start, limit, mean):
'''
:type start: int
:type limit: int
:type mean: int
:rtype: list()
'''
d = []
for x in range(start, limit):
d.append(float(series[x] - mean))
return d
def standartDeviation(self, series, start, limit):
'''
:type start: int
:type limit: int
:rtype: float
'''
return float(np.std(series[start:limit]))
def calculateHurst(self, series, exponent=None):
'''
:type series: List
:type exponent: int
:rtype: float
'''
rescaledRange = list()
sizeRange = list()
rescaledRangeMean = list()
if(exponent is None):
exponent = self.bestExponent(len(series))
for i in range(0, exponent):
partsNumber = int(math.pow(2, i))
size = int(len(series)/partsNumber)
sizeRange.append(size)
rescaledRange.append(0)
rescaledRangeMean.append(0)
for x in range(0, partsNumber):
start = int(size*(x))
limit = int(size*(x+1))
deviationAcumulative = self.sumDeviation(self.deviation(
series, start, limit, self.mean(series, start, limit)))
deviationsDifference = float(
max(deviationAcumulative) - min(deviationAcumulative))
standartDeviation = self.standartDeviation(
series, start, limit)
if(deviationsDifference != 0 and standartDeviation != 0):
rescaledRange[i] += (deviationsDifference /
standartDeviation)
y = 0
for x in rescaledRange:
rescaledRangeMean[y] = x/int(math.pow(2, y))
y = y+1
# log calculation
rescaledRangeLog = list()
sizeRangeLog = list()
for i in range(0, exponent):
rescaledRangeLog.append(math.log(rescaledRangeMean[i], 10))
sizeRangeLog.append(math.log(sizeRange[i], 10))
slope, intercept = np.polyfit(sizeRangeLog, rescaledRangeLog, 1)
ablineValues = [slope * i + intercept for i in sizeRangeLog]
plt.plot(sizeRangeLog, rescaledRangeLog, '--')
plt.plot(sizeRangeLog, ablineValues, 'b')
plt.title(slope)
# graphic dimension settings
limitUp = 0
if(max(sizeRangeLog) > max(rescaledRangeLog)):
limitUp = max(sizeRangeLog)
else:
limitUp = max(rescaledRangeLog)
limitDown = 0
if(min(sizeRangeLog) > min(rescaledRangeLog)):
limitDown = min(rescaledRangeLog)
else:
limitDown = min(sizeRangeLog)
plt.gca().set_xlim(limitDown, limitUp)
plt.gca().set_ylim(limitDown, limitUp)
print("Hurst exponent: " + str(slope))
plt.show()
return slope
def quit(self):
raise SystemExit()
if __name__ == "__main__":
RSanalysis().run(sys.argv[1:])
| mit |
anizz-rocks/HR-Data-Analysis_kaggleData | HR Analysis.py | 1 | 4638 |
# coding: utf-8
# <h1> Human Resource Analysis
# In[91]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from sklearn.tree import DecisionTreeClassifier as dtclf
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.ensemble import AdaBoostClassifier as adaBoost
from sklearn.ensemble import GradientBoostingClassifier as GrdBoost
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.linear_model import LogisticRegression as Logistic
# In[92]:
path = "HR_comma_sep.csv"
data=pd.read_csv(path)
data.columns
# In[93]:
dept = {'sales' : 1 , 'marketing': 2,'technical' : 3 , 'support': 4,'product_mng' : 5 , 'IT': 6,'hr' : 7 , 'management': 8,'accounting' : 9,'RandD':10 }
salary = {'low':0,'medium':1,'high':2}
df=data.replace({'sales':dept})
df=df.replace({'salary':salary})
data = df
# In[94]:
#Data Describe
data_Info = df.info()
df.head(5)
# <h2>#Distribution of Independent Variables
# In[101]:
plt.figure(figsize=(9, 8))
sns.distplot(df['satisfaction_level'], color='r', bins=10, hist_kws={'alpha': 0.4});
print(df['satisfaction_level'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['last_evaluation'], color='b', bins=10, hist_kws={'alpha': 0.4});
print(df['last_evaluation'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['time_spend_company'], color='y', bins=10, hist_kws={'alpha': 0.4});
print(df['time_spend_company'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['number_project'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['number_project'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['salary'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['salary'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['promotion_last_5years'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['promotion_last_5years'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['average_montly_hours'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['average_montly_hours'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['Work_accident'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['Work_accident'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['sales'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['sales'].describe())
plt.figure(figsize=(9, 8))
sns.distplot(df['left'], color='c', bins=10, hist_kws={'alpha': 0.4});
print(df['left'].describe())
# In[100]:
df_num = df.select_dtypes(include = ['float64', 'int64'])
df_num.head()
df_num.hist(figsize=(16, 20), bins=50, xlabelsize=8, ylabelsize=8);
# <h2> Check the Multi-Coliniarity
# In[23]:
g = sns.pairplot(data)
g.savefig("Mult_colinear.png")
# In[5]:
X = data[['satisfaction_level', 'last_evaluation', 'number_project',
'average_montly_hours', 'time_spend_company', 'Work_accident',
'promotion_last_5years', 'sales', 'salary']]
Y = data[['left']]
# <h2># feature extraction</h2>
# <h3> SelectKBest </h3>
# In[102]:
# feature extraction
test = SelectKBest(score_func=chi2, k=9)
fit = test.fit(X,Y)
np.set_printoptions(precision=3)
print(fit.scores_)
feature_vect = fit.transform(X)
feature_vect
# In[103]:
X_train, X_test, y_train, y_test = train_test_split(feature_vect,Y, test_size=0.40, random_state=42)
# <h2> Decision Tree
# In[9]:
model = dtclf()
model.fit(X_train , y_train)
# In[10]:
y_pred = model.predict(X_test)
confusion_matrix(y_test, y_pred)
# In[11]:
accuracy_score(y_test, y_pred)
# In[12]:
f1_score(y_test, y_pred,average='macro')
# <h2> <b> Ada-Boost with decision Tree as base-estimator ( An ensemble Approach)
# In[13]:
Model = adaBoost(n_estimators=100,base_estimator=dtclf(),learning_rate=0.98)
Model.fit(X_train , y_train)
y_pred = model.predict(X_test)
# In[14]:
confusion_matrix(y_test, y_pred)
# In[15]:
accuracy_score(y_test, y_pred)
# In[16]:
f1_score(y_test, y_pred,average='macro')
# <h2> Logistic Regression
# In[17]:
Model = Logistic(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None,
solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1)
Model.fit(X_train , y_train)
y_pred = model.predict(X_test)
# In[18]:
confusion_matrix(y_test, y_pred)
# In[19]:
accuracy_score(y_test, y_pred)
# In[20]:
f1_score(y_test, y_pred,average='macro')
| gpl-3.0 |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| agpl-3.0 |
jakejhansen/minesweeper_solver | evolutionary/generateplots.py | 1 | 1317 | import matplotlib.pyplot as plt
import os
import pickle
try:
with open(os.path.join('results.pkl'), 'rb') as f:
results = pickle.load(f)
except:
print("Failed to load checkpoint")
raise
fig = plt.figure()
plt.plot(results['steps'], results['mean_pop_rewards'])
plt.plot(results['steps'], results['test_rewards'])
plt.xlabel('Environment steps')
plt.ylabel('Reward')
plt.legend(['Mean population reward', 'Test reward'])
plt.tight_layout()
plt.grid()
plt.savefig(os.path.join('progress1.pdf'))
plt.close(fig)
fig = plt.figure(figsize=(4, 8))
plt.subplot(3, 1, 1)
plt.plot(results['steps'], results['mean_pop_rewards'])
plt.ylabel('Mean population reward')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(results['steps'], results['test_rewards'])
plt.ylabel('Test reward')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(results['steps'], results['weight_norm'])
plt.ylabel('Weight norm')
plt.xlabel('Environment steps')
plt.tight_layout()
plt.grid()
plt.savefig(os.path.join('progress2.pdf'))
plt.close(fig)
try:
fig = plt.figure()
plt.plot(results['steps'], results['win_rate'])
plt.xlabel('Environment steps')
plt.ylabel('Win rate')
plt.tight_layout()
plt.grid()
plt.savefig(os.path.join('progress2.pdf'))
plt.close(fig)
except KeyError:
print('No win rate logged')
| mit |
yask123/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/metrics/classification.py | 1 | 71650 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
gengliangwang/spark | python/pyspark/pandas/tests/test_repr.py | 15 | 7832 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ReprTest(PandasOnSparkTestCase):
max_display_count = 23
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("display.max_rows", ReprTest.max_display_count)
@classmethod
def tearDownClass(cls):
reset_option("display.max_rows")
super().tearDownClass()
def test_repr_dataframe(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue(
repr(psdf).startswith(repr(psdf.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
def test_repr_series(self):
psser = ps.range(ReprTest.max_display_count).id
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count).id.rename()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count)]
).to_series()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
def test_repr_indexes(self):
psidx = ps.range(ReprTest.max_display_count).index
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_series().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples([(100 * i, i) for i in range(ReprTest.max_display_count)])
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_frame().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
def test_html_repr(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in psdf._repr_html_())
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
def test_repr_float_index(self):
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count)},
index=np.random.rand(ReprTest.max_display_count),
)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.a))
self.assert_eq(repr(psdf.a), repr(psdf.a.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.index))
self.assert_eq(repr(psdf.index), repr(psdf.index.to_pandas()))
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count + 1)},
index=np.random.rand(ReprTest.max_display_count + 1),
)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue("Showing only the first" in repr(psdf.a))
self.assertTrue("Showing only the first" in repr(psdf.index))
self.assertTrue("Showing only the first" in psdf._repr_html_())
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_repr import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
kw217/omim | tools/python/city_radius.py | 53 | 4375 | import sys, os, math
import matplotlib.pyplot as plt
from optparse import OptionParser
cities = []
def strip(s):
return s.strip('\t\n ')
def load_data(path):
global cities
f = open(path, 'r')
lines = f.readlines()
f.close();
for l in lines:
if l.startswith('#'):
continue
data = l.split('|')
if len(data) < 6:
continue
item = {}
item['name'] = strip(data[0])
item['population'] = int(strip(data[1]))
item['region'] = strip(data[2])
item['width'] = float(strip(data[3]))
item['height'] = float(strip(data[4]))
item['square'] = float(data[5])
cities.append(item)
# build plot
print "Cities count: %d" % len(cities)
def formula(popul, base = 32, mult = 0.5):
#return math.exp(math.log(popul, base)) * mult
return math.pow(popul, 1 / base) * mult
def avgDistance(approx, data):
dist = 0
for x in xrange(len(data)):
dist += math.fabs(approx[x] - data[x])
return dist / float(len(data))
def findBest(popul, data, minBase = 5, maxBase = 100, stepBase = 0.1, minMult = 0.01, maxMult = 1, stepMult = 0.01):
# try to find best parameters
base = minBase
minDist = -1
bestMult = minMult
bestBase = base
while base <= maxBase:
print "%.02f%% best mult: %f, best base: %f, best dist: %f" % (100 * (base - minBase) / (maxBase - minBase), bestMult, bestBase, minDist)
mult = minMult
while mult <= maxMult:
approx = []
for p in popul:
approx.append(formula(p, base, mult))
dist = avgDistance(approx, data)
if minDist < 0 or minDist > dist:
minDist = dist
bestBase = base
bestMult = mult
mult += stepMult
base += stepBase
return (bestBase, bestMult)
def process_data(steps_count, base, mult, bestFind = False, dataFlag = 0):
avgData = []
maxData = []
sqrData = []
population = []
maxPopulation = 0
minPopulation = -1
for city in cities:
p = city['population']
w = city['width']
h = city['height']
s = city['square']
population.append(p)
if p > maxPopulation:
maxPopulation = p
if minPopulation < 0 or p < minPopulation:
minPopulation = p
maxData.append(max([w, h]))
avgData.append((w + h) * 0.5)
sqrData.append(math.sqrt(s))
bestBase = base
bestMult = mult
if bestFind:
d = maxData
if dataFlag == 1:
d = avgData
elif dataFlag == 2:
d = sqrData
bestBase, bestMult = findBest(population, d)
print "Finished\n\nBest mult: %f, Best base: %f" % (bestMult, bestBase)
approx = []
population2 = []
v = minPopulation
step = (maxPopulation - minPopulation) / float(steps_count)
for i in xrange(0, steps_count):
approx.append(formula(v, bestBase, bestMult))
population2.append(v)
v += step
plt.plot(population, avgData, 'bo', population, maxData, 'ro', population, sqrData, 'go', population2, approx, 'y')
plt.axis([minPopulation, maxPopulation, 0, 100])
plt.xscale('log')
plt.show()
if __name__ == "__main__":
if len(sys.argv) < 3:
print 'city_radius.py <data_file> <steps>'
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", default="city_popul_sqr.data",
help="source data file", metavar="path")
parser.add_option("-s", "--scan",
dest="best", default=False, action="store_true",
help="scan best values of mult and base")
parser.add_option('-m', "--mult",
dest='mult', default=1,
help='multiplier value')
parser.add_option('-b', '--base',
dest='base', default=3.6,
help="base value")
parser.add_option('-d', '--data',
default=0, dest='data',
help="Dataset to use on best values scan: 0 - max, 1 - avg, 2 - sqr")
(options, args) = parser.parse_args()
load_data(options.filename)
process_data(1000, float(options.base), float(options.mult), options.best, int(options.data))
| apache-2.0 |
datachand/h2o-3 | h2o-py/tests/testdir_scikit_grid/pyunit_scal_pca_rf_grid.py | 5 | 1751 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def scale_pca_rf_pipe():
from h2o.transforms.preprocessing import H2OScaler
from h2o.transforms.decomposition import H2OPCA
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.grid_search import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
# build transformation pipeline using sklearn's Pipeline and H2O transforms
pipe = Pipeline([("standardize", H2OScaler()),
("pca", H2OPCA(n_components=2)),
("rf", H2ORandomForestEstimator(seed=42,ntrees=50))])
params = {"standardize__center": [True, False], # Parameters to test
"standardize__scale": [True, False],
"pca__n_components": randint(2, iris[1:].shape[1]),
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),}
custom_cv = H2OKFold(iris, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe, params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(iris[1:],iris[0])
print random_search.best_estimator_
if __name__ == "__main__":
tests.run_test(sys.argv, scale_pca_rf_pipe)
| apache-2.0 |
rs2/pandas | pandas/tests/extension/test_string.py | 2 | 2671 | import string
import numpy as np
import pytest
import pandas as pd
from pandas.core.arrays.string_ import StringArray, StringDtype
from pandas.tests.extension import base
@pytest.fixture
def dtype():
return StringDtype()
@pytest.fixture
def data():
strings = np.random.choice(list(string.ascii_letters), size=100)
while strings[0] == strings[1]:
strings = np.random.choice(list(string.ascii_letters), size=100)
return StringArray._from_sequence(strings)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return StringArray._from_sequence([pd.NA, "A"])
@pytest.fixture
def data_for_sorting():
return StringArray._from_sequence(["B", "C", "A"])
@pytest.fixture
def data_missing_for_sorting():
return StringArray._from_sequence(["B", pd.NA, "A"])
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping():
return StringArray._from_sequence(["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestNoReduce(base.BaseNoReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
if op_name in ["min", "max"]:
return None
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="returns nullable")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
class TestCasting(base.BaseCastingTests):
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
result = getattr(s, op_name)(other)
expected = getattr(s.astype(object), op_name)(other).astype("boolean")
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, "abc")
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestGroupBy(base.BaseGroupbyTests):
pass
| bsd-3-clause |
jseabold/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
alexandrucoman/bcbio-nextgen-vm | bcbiovm/provider/azure/azure_provider.py | 1 | 8597 | """Azure Cloud Provider for bcbiovm."""
# pylint: disable=no-self-use
import os
from bcbiovm import log as loggig
from bcbiovm.common import constant
from bcbiovm.common import exception
from bcbiovm.common import objects
from bcbiovm.common import utils as common_utils
from bcbiovm.provider import base
from bcbiovm.provider.common import bootstrap as common_bootstrap
from bcbiovm.provider.azure import storage as azure_storage
LOG = loggig.get_logger(__name__)
class AzureProvider(base.BaseCloudProvider):
"""Azure Provider for bcbiovm.
:ivar flavors: A dictionary with all the flavors available for
the current cloud provider.
Example:
::
flavors = {
"m3.large": Flavor(cpus=2, memory=3500),
"m3.xlarge": Flavor(cpus=4, memory=3500),
"m3.2xlarge": Flavor(cpus=8, memory=3500),
}
"""
# More information regarding Azure instances types can be found on the
# following link: https://goo.gl/mEjiC5
flavors = {
"ExtraSmall": objects.Flavor(cpus=1, memory=768),
"Small": objects.Flavor(cpus=1, memory=1792),
"Medium": objects.Flavor(cpus=2, memory=3584),
"Large": objects.Flavor(cpus=4, memory=7168),
"ExtraLarge": objects.Flavor(cpus=8, memory=14336),
# General Purpose: For websites, small-to-medium databases,
# and other everyday applications.
"A0": objects.Flavor(cpus=1, memory=768), # 0.75 GB
"A1": objects.Flavor(cpus=1, memory=1792), # 1.75 GB
"A2": objects.Flavor(cpus=2, memory=3584), # 3.50 GB
"A3": objects.Flavor(cpus=4, memory=7168), # 7.00 GB
"A4": objects.Flavor(cpus=8, memory=14336), # 14.00 GB
# Memory Intensive: For large databases, SharePoint server farms,
# and high-throughput applications.
"A5": objects.Flavor(cpus=2, memory=14336), # 14.00 GB
"A6": objects.Flavor(cpus=4, memory=28672), # 28.00 GB
"A7": objects.Flavor(cpus=8, memory=57344), # 56.00 GB
# Network optimized: Ideal for Message Passing Interface (MPI)
# applications, high-performance clusters,
# modeling and simulations and other compute
# or network intensive scenarios.
"A8": objects.Flavor(cpus=8, memory=57344), # 56.00 GB
"A9": objects.Flavor(cpus=16, memory=114688), # 112.00 GB
# Compute Intensive: For high-performance clusters, modeling
# and simulations, video encoding, and other
# compute or network intensive scenarios.
"A10": objects.Flavor(cpus=8, memory=57344), # 56.00 GB
"A11": objects.Flavor(cpus=16, memory=114688), # 112.00 GB
# Optimized compute: 60% faster CPUs, more memory, and local SSD
# General Purpose: For websites, small-to-medium databases,
# and other everyday applications.
"D1": objects.Flavor(cpus=1, memory=3584), # 3.50 GB
"D2": objects.Flavor(cpus=2, memory=7168), # 7.00 GB
"D3": objects.Flavor(cpus=4, memory=14336), # 14.00 GB
"D4": objects.Flavor(cpus=8, memory=28672), # 28.00 GB
# Memory Intensive: For large databases, SharePoint server farms,
# and high-throughput applications.
"D11": objects.Flavor(cpus=2, memory=14336), # 14.00 GB
"D12": objects.Flavor(cpus=4, memory=28672), # 28.00 GB
"D13": objects.Flavor(cpus=8, memory=57344), # 56.00 GB
"D14": objects.Flavor(cpus=16, memory=114688), # 112.00 GB
}
_STORAGE = {"AzureBlob": azure_storage.AzureBlob}
def __init__(self):
super(AzureProvider, self).__init__(name=constant.PROVIDER.AZURE)
self._biodata_template = ("https://bcbio.blob.core.windows.net/biodata"
"/prepped/{build}/{build}-{target}.tar.gz")
def get_storage_manager(self, name="AzureBlob"):
"""Return a cloud provider specific storage manager.
:param name: The name of the required storage manager.
"""
return self._STORAGE.get(name)()
def information(self, config, cluster):
"""
Get all the information available for this provider.
The returned information will be used to create a status report
regarding the bcbio instances.
:config: elasticluster config file
:cluster: cluster name
"""
raise exception.NotSupported(feature="Method information",
context="Azure provider")
def colect_data(self, config, cluster, rawdir):
"""Collect from the each instances the files which contains
information regarding resources consumption.
:param config: elasticluster config file
:param cluster: cluster name
:param rawdir: directory where to copy raw collectl data files.
Notes:
The current workflow is the following:
- establish a SSH connection with the instance
- get information regarding the `collectl` files
- copy to local the files which contain new information
This files will be used in order to generate system statistics
from bcbio runs. The statistics will contain information regarding
CPU, memory, network, disk I/O usage.
"""
raise exception.NotSupported(feature="Method collect_data",
context="Azure provider")
def resource_usage(self, bcbio_log, rawdir):
"""Generate system statistics from bcbio runs.
Parse the files obtained by the :meth colect_data: and put the
information in :class pandas.DataFrame:.
:param bcbio_log: local path to bcbio log file written by the run
:param rawdir: directory to put raw data files
:return: a tuple with two dictionaries, the first contains
an instance of :pandas.DataFrame: for each host and
the second one contains information regarding the
hardware configuration
:type return: tuple
"""
raise exception.NotSupported(feature="Method resource_usage",
context="Azure provider")
def bootstrap(self, config, cluster, reboot):
"""Install or update the bcbio-nextgen code and the tools
with the latest version available.
:param config: elasticluster config file
:param cluster: cluster name
:param reboot: whether to upgrade and restart the host OS
"""
bootstrap = common_bootstrap.Bootstrap(provider=self, config=config,
cluster_name=cluster,
reboot=reboot)
return bootstrap.run()
def upload_biodata(self, genome, target, source, context):
"""Upload biodata for a specific genome build and target to a storage
manager.
:param genome: Genome which should be uploaded.
:param target: The pice from the genome that should be uploaded.
:param source: A list of directories which contain the information
that should be uploaded.
:param context: A dictionary that may contain useful information
for the cloud provider (credentials, headers etc).
"""
storage_manager = self.get_storage_manager()
biodata = self._biodata_template.format(build=genome, target=target)
try:
archive = common_utils.compress(source)
file_info = storage_manager.parse_remote(biodata)
if storage_manager.exists(file_info.container, file_info.blob,
context):
LOG.info("The %(biodata)r build already exist",
{"biodata": file_info.blob})
return
LOG.info("Upload pre-prepared genome data: %(genome)s, "
"%(target)s:", {"genome": genome, "target": target})
storage_manager.upload(path=archive, filename=file_info.blob,
container=file_info.container,
context=context)
finally:
if os.path.exists(archive):
os.remove(archive)
| mit |
lucabaldini/ximpol | ximpol/config/cyg_x1_spherical_corona.py | 1 | 5178 | #!/usr/bin/env python
#
# Copyright (C) 2015--2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public Licensese as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
import sys
import os
import scipy
import scipy.signal
from ximpol.srcmodel.roi import xPointSource, xROIModel
from ximpol.srcmodel.spectrum import power_law
from ximpol.srcmodel.polarization import constant
from ximpol.core.spline import xInterpolatedUnivariateSpline
from ximpol.core.spline import xInterpolatedUnivariateSplineLinear
from ximpol import XIMPOL_CONFIG
from ximpol.utils.logging_ import logger
#Not sure that I have the right ra and dec for the source, check!!
CYGX1_RA = 7.56
CYGX1_DEC = 6.59
MIN_ENERGY = 0.1
MAX_ENERGY = 15.
#These are the files corresponding to the first version of the model
#POL_DEGREE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', 'CygX1_poldegree_model.txt')
#POL_ANGLE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', 'CygX1_polangle_model.txt')
#FLUX_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', 'CygX1_flux_model.txt')
#
model_type = 'spherical'
#These are the files sent on June 8, 2016, for 63 degree inclination
#POL_DEGREE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_pol_degree_%s_corona_model_more_data.txt'%(model_type,model_type))
#POL_ANGLE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_pol_angle_%s_corona_model_more_data.txt'%(model_type,model_type))
#FLUX_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_flux_%s_corona_model_more_data.txt'%(model_type,model_type))
#For 40 degree inclination
POL_DEGREE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_pol_degree_%s_corona_model_40_inclination.txt'%(model_type,model_type))
POL_ANGLE_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_pol_angle_%s_corona_model_40_inclination.txt'%(model_type,model_type))
FLUX_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', '%s_corona/cyg_x1_flux_%s_corona_model_40_inclination.txt'%(model_type,model_type))
def polarization_degree(E, t, ra, dec):
return pol_degree_spline(E)
def polarization_angle(E, t, ra, dec):
return pol_angle_spline(E)
def energy_spectrum(E, t):
return _energy_spectrum(E)
# Build the polarization degree as a function of the energy.
_energy, _pol_degree = numpy.loadtxt(POL_DEGREE_FILE_PATH, unpack=True)
#Switch to have degrees
_pol_degree /= 100.
#_pol_degree = _pol_degree
#print "Here are the energy and pol degree values"
#print _energy
#print
#print _pol_degree
# Filter the data points to reduce the noise.
#_pol_degree = scipy.signal.wiener(_pol_degree, 5)
_mask = (_energy >= MIN_ENERGY)*(_energy <= MAX_ENERGY)
_energy = _energy[_mask]
_pol_degree = _pol_degree[_mask]
fmt = dict(xname='Energy', yname='Polarization degree')
pol_degree_spline = xInterpolatedUnivariateSpline(_energy, _pol_degree, k=1, **fmt)
# Build the polarization angle as a function of the energy.
_energy, _pol_angle = numpy.loadtxt(POL_ANGLE_FILE_PATH, unpack=True)
_pol_angle = numpy.deg2rad(_pol_angle)
#Switched to have degrees and not radians
#_pol_angle = _pol_angle
#print "Here are the energy and pol angle values"
#print _energy
#print
#print _pol_angle
# Filter the data points to reduce the noise.
#_pol_angle = scipy.signal.wiener(_pol_angle, 2)
_mask = (_energy >= MIN_ENERGY)*(_energy <= MAX_ENERGY)
_energy = _energy[_mask]
_pol_angle = _pol_angle[_mask]
fmt = dict(xname='Energy', yname='Polarization angle [rad]')
pol_angle_spline = xInterpolatedUnivariateSpline(_energy, _pol_angle, k=1, **fmt)
#"""
#put together the flux for the source starting from the txt file
_energy, _flux = numpy.loadtxt(FLUX_FILE_PATH, unpack=True)
_mask = (_energy >= MIN_ENERGY)*(_energy <= MAX_ENERGY)
_energy = _energy[_mask]
_flux = _flux[_mask]
fmt = dict(xname='Energy', xunits='keV', yname='Flux',
yunits='cm$^{-2}$ s$^{-1}$ keV$^{-1}$')
_energy_spectrum = xInterpolatedUnivariateSpline(_energy, _flux, **fmt)
#"""
ROI_MODEL = xROIModel(CYGX1_RA, CYGX1_DEC)
src = xPointSource('Cyg-X1', ROI_MODEL.ra, ROI_MODEL.dec, energy_spectrum,
polarization_degree, polarization_angle)
ROI_MODEL.add_source(src)
if __name__ == '__main__':
print(ROI_MODEL)
from ximpol.utils.matplotlib_ import pyplot as plt
fig = plt.figure('Spectrum')
_energy_spectrum.plot(show=False)
fig = plt.figure('Polarization angle')
pol_angle_spline.plot(show=False)
fig = plt.figure('Polarization degree')
pol_degree_spline.plot(show=False)
plt.show()
| gpl-3.0 |
kaslusimoes/SummerSchool2016 | python/almostnewsimulation.py | 1 | 7049 | #! /bin/env python2
# coding: utf-8
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random as rd
import os
from pickle import dump, load
class Data:
def __init__(self):
self.m_list1 = []
self.m_list2 = []
N = 100
M = 100
MAX = N + M + 1
MAX_EDGE = 380
MAX_DEG = 450
ITERATIONS = 50000
S1 = 0.
T1 = 1.
S2 = 0.
T2 = 1.
beta = 0.5
NUMGRAPH = 10
NSIM = 10
NAME = "finetuning"
# initial fraction of cooperators
p1, p2 = .5, .5
# number of cooperators
cc1, cc2 = 0, 0
# fraction of cooperators
r1, r2 = np.zeros(ITERATIONS + 1, dtype=np.float), np.zeros(ITERATIONS + 1, dtype=np.float)
payoff = np.array(
[
[1, S1],
[T1, 0]
]
, dtype=np.float, ndmin=2)
payoff2 = np.array(
[
[1, S2],
[T2, 0]
]
, dtype=np.float, ndmin=2)
def interaction(x, y):
if x < N:
return payoff[g.node[x]['strategy']][g.node[y]['strategy']]
else:
return payoff2[g.node[x]['strategy']][g.node[y]['strategy']]
def change_prob(x, y):
return 1. / (1 + np.exp(-beta * (y - x)))
def complete():
return nx.complete_bipartite_graph(N, M)
def random():
g = nx.Graph()
g.add_nodes_from(np.arange(0, N + M, 1, dtype=np.int))
while g.number_of_edges() < MAX_EDGE:
a, b = rd.randint(0, N - 1), rd.randint(N, N + M - 1)
if b not in g[a]:
g.add_edge(a, b)
return g
def set_initial_strategy(g):
global cc1, cc2
coop = range(0, int(p1 * N), 1) + range(N, int(p2 * M) + N, 1)
cc1 = int(p1 * N)
defect = set(range(0, N + M, 1)) - set(coop)
cc2 = int(p2 * M)
coop = dict(zip(coop, len(coop) * [0]))
defect = dict(zip(defect, len(defect) * [1]))
nx.set_node_attributes(g, 'strategy', coop)
nx.set_node_attributes(g, 'strategy', defect)
def fitness(x):
ret = 0
for i in g.neighbors(x):
ret += interaction(x, i)
return ret
def simulate():
global cc1, cc2
it = 0
while it < ITERATIONS:
it += 1
if it % 2:
a = rd.randint(0, N - 1)
else:
a = rd.randint(N, N + M - 1)
if len(g.neighbors(a)) == 0:
it -= 1
continue
b = g.neighbors(a)[rd.randint(0, len(g.neighbors(a)) - 1)]
b = g.neighbors(b)[rd.randint(0, len(g.neighbors(b)) - 1)]
if a == b:
it -= 1
continue
assert (a < N and b < N) or (a >= N and b >= N)
if g.node[a]['strategy'] != g.node[b]['strategy']:
fa, fb = fitness(a), fitness(b)
l = np.random.random()
p = change_prob(fa, fb)
if l <= p:
if a < N:
if g.node[a]['strategy'] == 0:
cc1 -= 1
else:
cc1 += 1
else:
if g.node[a]['strategy'] == 0:
cc2 -= 1
else:
cc2 += 1
nx.set_node_attributes(g, 'strategy', { a:g.node[b]['strategy'] })
r1[it] = float(cc1) / N
r2[it] = float(cc2) / M
nbins = 20
Trange = np.linspace(1,2,nbins)
Srange = np.linspace(-1,0,nbins)
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
for G in xrange(NUMGRAPH):
g = random()
i = 0
data = Data()
for S1 in S1range:
S2 = S1
j = 0
for T1 in T1range:
global payoff, payoff2
T2 = T1
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
set_initial_strategy(g)
simulate()
mag1[i][j] = np.mean(r1[-1000:])
mag2[i][j] = np.mean(r2[-1000:])
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
j += 1
i += 1
f = open('random graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Graph {0}".format(G))
g = complete()
i = 0
data = Data()
for S1 in S1range:
j = 0
for T1 in T1range:
global payoff, payoff2
for S2 in S2range:
for T2 in T2range:
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
set_initial_strategy(g)
simulate()
mag1[i][j] = np.mean(r1[-1000:])
mag2[i][j] = np.mean(r2[-1000:])
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
j += 1
i += 1
f = open('complete graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Graph {0}".format(G))
# p = './graphs/'
# sc_graphs = []
# for _,_,c in os.walk(p):
# for a,x in enumerate(c):
# pp = os.path.join(p,x)
# f = open(pp, 'r')
# g = load(f)
# sc_graphs.append(g)
# for G, g in sc_graphs:
# i = 0
# data = Data()
# for S1 in S1range:
# j = 0
# for T1 in T1range:
# global payoff, payoff2
# for S2 in S2range:
# for T2 in T2range:
# payoff = np.array([
# [1, S1],
# [T1, 0]], dtype=np.float, ndmin=2)
# payoff2 = np.array([
# [1, S2],
# [T2, 0]], dtype=np.float, ndmin=2)
# for SS in xrange(NSIM):
# mag1 = np.zeros((nbins, nbins), dtype=np.float)
# mag2 = np.zeros((nbins, nbins), dtype=np.float)
# set_initial_strategy(g)
# simulate()
# mag1[i][j] = np.mean(r1[-1000:])
# mag2[i][j] = np.mean(r2[-1000:])
# data.m_list1.append((S1, T1, S2, T2, mag1))
# data.m_list2.append((S1, T1, S2, T2, mag2))
# j += 1
# i += 1
# f = open('scalefree graph {1} {0}.grph'.format(G, NAME), 'w')
# dump(data,f,2)
# f.close()
# print("Finished Graph {0}".format(G))
| apache-2.0 |
clarkfitzg/xray | xray/test/test_variable.py | 2 | 35943 | from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from xray import Variable, Dataset, DataArray
from xray.core import indexing
from xray.core.variable import (Coordinate, as_variable, _as_compatible_data)
from xray.core.indexing import (NumpyIndexingAdapter, PandasIndexAdapter,
LazilyIndexedArray)
from xray.core.pycompat import PY3, OrderedDict
from . import TestCase, source_ndarray
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
else:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self.assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self.assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self.assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self.assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self.assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT')
x = self.cls(['x'], [d])
self.assertIndexedLikeNDArray(x, d, None)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self.assertIndexedLikeNDArray(x, item)
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, False),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
if preserve_source and self.cls is Variable:
self.assertTrue(same_source)
else:
self.assertFalse(same_source)
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, False),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
if preserve_source and self.cls is Variable:
self.assertTrue(same_source)
else:
self.assertFalse(same_source)
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
v = self.cls(['x'], x)
# unary ops
self.assertVariableIdentical(v, +v)
self.assertVariableIdentical(v, abs(v))
self.assertArrayEqual((-v).values, -x)
# bianry ops with numbers
self.assertVariableIdentical(v, v + 0)
self.assertVariableIdentical(v, 0 + v)
self.assertVariableIdentical(v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y))
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, Coordinate)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, Coordinate)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
self.assertVariableIdentical(v.argsort(), v)
self.assertVariableIdentical(v.clip(2, 3), self.cls('x', x.clip(2, 3)))
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)))
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), Coordinate)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = self.cls('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat([v, w], indexers=[range(0, 10, 2), range(1, 10, 2)], dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'same number of dimensions'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)]))
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = Coordinate('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
self.assertVariableIdentical(expected, as_variable(ds['x']))
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
self.assertIsInstance(as_variable(ds['x'], strict=False), DataArray)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
xarray_tuple = (expected.dims, expected.values)
self.assertVariableIdentical(expected, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'cannot convert arg'):
as_variable(tuple(data))
with self.assertRaisesRegexp(TypeError, 'cannot infer .+ dimensions'):
as_variable(data)
actual = as_variable(data, key='x')
self.assertVariableIdentical(expected, actual)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_expand_dims(self):
v = Variable(['x'], [0, 1])
actual = v.expand_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.expand_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.expand_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.expand_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.expand_dims(['z'])
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x)))
self.assertVariableIdentical(
v[0] * v[:, 0],
Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0])))
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(['b', 'c', 'd'], y)
self.assertVariableIdentical(
v * w, Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,bcd->abcd', x, y)))
self.assertVariableIdentical(
w * v, Variable(['b', 'c', 'd', 'a'],
np.einsum('bcd,ab->bcda', y, x)))
self.assertVariableIdentical(
v * w[0], Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,cd->abcd', x, y[0])))
def test_broadcasting_failures(self):
a = Variable(['x'], np.arange(10))
b = Variable(['x'], np.arange(5))
c = Variable(['x', 'x'], np.arange(100).reshape(10, 10))
with self.assertRaisesRegexp(ValueError, 'mismatched lengths'):
a + b
with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(['x'], x)
v2 = v
v2 += 1
self.assertIs(v, v2)
# since we provided an ndarray for data, it is also modified in-place
self.assertIs(source_ndarray(v.values), x)
self.assertArrayEqual(v.values, np.arange(5) + 1)
with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'):
v += Variable('y', np.arange(5))
def test_reduce(self):
v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
self.assertVariableIdentical(v.reduce(np.std, 'x'),
Variable(['y'], self.d.std(axis=0)))
self.assertVariableIdentical(v.reduce(np.std, axis=0),
v.reduce(np.std, dim='x'))
self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
Variable([], self.d.std(axis=(0, 1))))
self.assertVariableIdentical(v.reduce(np.std),
Variable([], self.d.std()))
self.assertVariableIdentical(
v.reduce(np.mean, 'x').reduce(np.std, 'y'),
Variable([], self.d.mean(axis=0).std()))
self.assertVariableIdentical(v.mean('x'), v.reduce(np.mean, 'x'))
with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
v.mean(dim='x', axis=0)
def test_reduce_funcs(self):
v = Variable('x', np.array([1, np.nan, 2, 3]))
self.assertVariableIdentical(v.mean(), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan))
self.assertVariableIdentical(np.mean(v), Variable([], 2))
self.assertVariableIdentical(v.prod(), Variable([], 6))
self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))
if LooseVersion(np.__version__) < '1.9':
with self.assertRaises(NotImplementedError):
v.median()
else:
self.assertVariableIdentical(v.median(), Variable([], 2))
v = Variable('x', [True, False, False])
self.assertVariableIdentical(v.any(), Variable([], True))
self.assertVariableIdentical(v.all(dim='x'), Variable([], False))
v = Variable('t', pd.date_range('2000-01-01', periods=3))
with self.assertRaises(NotImplementedError):
v.max(skipna=True)
self.assertVariableIdentical(
v.max(), Variable([], pd.Timestamp('2000-01-03')))
def test_reduce_keep_attrs(self):
_attrs = {'units': 'test', 'long_name': 'testing'}
v = Variable(['x', 'y'], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = v.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(_attrs))
self.assertEqual(vm.attrs, _attrs)
def test_count(self):
expected = Variable([], 3)
actual = Variable(['x'], [1, 2, 3, np.nan]).count()
self.assertVariableIdentical(expected, actual)
v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
actual = v.count()
self.assertVariableIdentical(expected, actual)
actual = Variable(['x'], [True, False, True]).count()
self.assertVariableIdentical(expected, actual)
self.assertEqual(actual.dtype, int)
expected = Variable(['x'], [2, 3])
actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
self.assertVariableIdentical(expected, actual)
class TestCoordinate(TestCase, VariableSubclassTestCases):
cls = staticmethod(Coordinate)
def test_init(self):
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
Coordinate((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = Coordinate(['time'], data, {'foo': 'bar'})
self.assertTrue(pd.Index(data, name='time').identical(v.to_index()))
def test_data(self):
x = Coordinate('x', np.arange(3.0))
# data should be initially saved as an ndarray
self.assertIs(type(x._data), np.ndarray)
self.assertEqual(float, x.dtype)
self.assertArrayEqual(np.arange(3), x)
self.assertEqual(float, x.values.dtype)
# after inspecting x.values, the Coordinate value will be saved as an Index
self.assertIsInstance(x._data, PandasIndexAdapter)
with self.assertRaisesRegexp(TypeError, 'cannot be modified'):
x[:] = 0
def test_name(self):
coord = Coordinate('x', [10.0])
self.assertEqual(coord.name, 'x')
with self.assertRaises(AttributeError):
coord.name = 'y'
class TestAsCompatibleData(TestCase):
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray)
for t in types:
for data in [np.arange(3),
pd.date_range('2000-01-01', periods=3),
pd.date_range('2000-01-01', periods=3).values]:
x = t(data)
self.assertIs(source_ndarray(x),
source_ndarray(_as_compatible_data(x)))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = _as_compatible_data(input_array)
self.assertArrayEqual(np.asarray(input_array), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.asarray(input_array).dtype, actual.dtype)
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = _as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(int), actual.dtype)
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = _as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(float), actual.dtype)
def test_datetime(self):
expected = np.datetime64('2000-01-01T00Z')
actual = _as_compatible_data(expected)
self.assertEqual(expected, actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z')])
actual = _as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')])
actual = _as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
self.assertIs(expected, source_ndarray(np.asarray(actual)))
expected = np.datetime64('2000-01-01T00Z', 'ns')
actual = _as_compatible_data(datetime(2000, 1, 1))
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
| apache-2.0 |
anne-urai/RT_RDK | graphicalModels/examples/huey_p_newton.py | 7 | 1513 | """
n-body particle inference
=========================
Dude.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([5.4, 2.0], origin=[0.65, 0.35])
kx, ky = 1.5, 1.
nx, ny = kx + 3., ky + 0.
hx, hy, dhx = kx - 0.5, ky + 1., 1.
pgm.add_node(daft.Node("dyn", r"$\theta_{\mathrm{dyn}}$", hx + 0. * dhx, hy + 0.))
pgm.add_node(daft.Node("ic", r"$\theta_{\mathrm{I.C.}}$", hx + 1. * dhx, hy + 0.))
pgm.add_node(daft.Node("sun", r"$\theta_{\odot}$", hx + 2. * dhx, hy + 0.))
pgm.add_node(daft.Node("bg", r"$\theta_{\mathrm{bg}}$", hx + 3. * dhx, hy + 0.))
pgm.add_node(daft.Node("Sigma", r"$\Sigma^2$", hx + 4. * dhx, hy + 0.))
pgm.add_plate(daft.Plate([kx - 0.5, ky - 0.6, 2., 1.1], label=r"model points $k$"))
pgm.add_node(daft.Node("xk", r"$x_k$", kx + 0., ky + 0.))
pgm.add_edge("dyn", "xk")
pgm.add_edge("ic", "xk")
pgm.add_node(daft.Node("yk", r"$y_k$", kx + 1., ky + 0.))
pgm.add_edge("sun", "yk")
pgm.add_edge("xk", "yk")
pgm.add_plate(daft.Plate([nx - 0.5, ny - 0.6, 2., 1.1], label=r"data points $n$"))
pgm.add_node(daft.Node("sigman", r"$\sigma^2_n$", nx + 1., ny + 0., observed=True))
pgm.add_node(daft.Node("Yn", r"$Y_n$", nx + 0., ny + 0., observed=True))
pgm.add_edge("bg", "Yn")
pgm.add_edge("Sigma", "Yn")
pgm.add_edge("Sigma", "Yn")
pgm.add_edge("yk", "Yn")
pgm.add_edge("sigman", "Yn")
# Render and save.
pgm.render()
pgm.figure.savefig("huey_p_newton.pdf")
pgm.figure.savefig("huey_p_newton.png", dpi=150)
| mit |
totalgood/nlpia | src/nlpia/embedders.py | 1 | 4047 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""model_poly_tsne
Run nlpia.data.download() to download GBs of models like W2V and the LSAmodel used here
Computes a TSNE embedding for the tweet LSA model and then fit a 2nd degree polynomial to that embedding.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
from past.builtins import basestring
standard_library.install_aliases() # noqa
import os
import gc
import pandas as pd
from tqdm import tqdm
from gensim.models import LsiModel, TfidfModel
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# from sklearn.svm import SVR
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from nlpia.constants import BIGDATA_PATH
from nlpia.data.loaders import read_csv
import sklearn.metrics.pairwise
np = pd.np
def positive_projection(x, y, max_norm=1.0):
proj = max_norm - float(np.dot(x, y))
if proj < 1e-15:
print(x, y, proj)
return max(proj, 0.0) ** 0.5
def positive_distances(X, metric='cosine'):
X = X.values if (hasattr(X, 'values') and not callable(X.values)) else X
metric = getattr(sklearn.metrics.pairwise, metric + '_distances') if isinstance(metric, basestring) else metric
distances = metric(X)
distances[distances < 0] = 0.0
return distances
def bent_distances(X, y, weight=1.0, metric='cosine'):
y = np.array(y).reshape((len(X), 1))
distances = positive_distances(X, metric=metric)
distances += weight * sklearn.metrics.pairwise.euclidean_distances(np.matrix(y).T)
return distances
def train_tsne(training_size=2000, metric='cosine', n_components=3, perplexity=100, angle=.12):
# adjust this downward to see it it affects accuracy
np = pd.np
tweets = read_csv(os.path.join(BIGDATA_PATH, 'tweets.csv.gz'))
tweets = tweets[tweets.isbot >= 0]
gc.collect() # reclaim RAM released above
# labels3 = tweets.isbot.apply(lambda x: int(x * 3))
labels = tweets.isbot.apply(lambda x: int(x * 2))
lsa = LsiModel.load(os.path.join(BIGDATA_PATH, 'lsa_tweets_5589798_2003588x200.pkl'))
tfidf = TfidfModel(id2word=lsa.id2word, dictionary=lsa.id2word)
bows = np.array([lsa.id2word.doc2bow(txt.split()) for txt in tweets.text])
# tfidfs = tfidf[bows]
X = pd.DataFrame([pd.Series(dict(v)) for v in tqdm(lsa[tfidf[bows]], total=len(bows))], index=tweets.index)
mask = ~X.isnull().any(axis=1)
mask.index = tweets.index
# >>> sum(~mask)
# 99
# >>> tweets.loc[mask.argmin()]
# isbot 0.17
# strict 13
# user b'CrisParanoid:'
# text b'#sad again'
# Name: 571, dtype: object
X = X[mask]
y = tweets.isbot[mask]
labels = labels[mask]
test_size = 1.0 - training_size if training_size < 1 else float(len(X) - training_size) / len(X)
Xindex, Xindex_test, yindex, yindex_test = train_test_split(X.index.values, y.index.values, test_size=test_size)
X, Xtest, y, ytest = X.loc[Xindex], X.loc[Xindex_test], y.loc[yindex], y.loc[yindex_test]
labels_test = labels.loc[yindex_test]
labels = labels.loc[yindex]
tsne = TSNE(metric='precomputed', n_components=n_components, angle=angle, perplexity=perplexity)
tsne = tsne.fit(positive_distances(X.values, metric=metric))
return tsne, X, Xtest, y, ytest
def embedding_correlation(Xtest, ytest):
pass
def plot_embedding(tsne, labels, index=None):
labels = labels.values if (hasattr(labels, 'values') and not callable(labels.values)) else labels
colors = np.array(list('gr'))[labels]
df = pd.DataFrame(tsne.embedding_, columns=list('xy'), index=index)
return df.plot(kind='scatter', x='x', y='y', c=colors)
| mit |
wisfern/vnpy | vnpy/trader/gateway/tkproGateway/TradeApi/utils.py | 4 | 2891 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from collections import namedtuple
import pandas as pd
def _to_date(row):
date = int(row['DATE'])
return pd.datetime(year=date // 10000, month=date // 100 % 100, day=date % 100)
def _to_datetime(row):
date = int(row['DATE'])
time = int(row['TIME']) // 1000
return pd.datetime(year=date // 10000, month=date // 100 % 100, day=date % 100,
hour=time // 10000, minute=time // 100 % 100, second=time % 100)
def _to_dataframe(cloumset, index_func=None, index_column=None):
df = pd.DataFrame(cloumset)
if index_func:
df.index = df.apply(index_func, axis=1)
elif index_column:
df.index = df[index_column]
del df.index.name
return df
def _error_to_str(error):
if error:
if 'message' in error:
return str(error['error']) + "," + error['message']
else:
return str(error['error']) + ","
else:
return ","
def to_obj(class_name, data):
try:
if isinstance(data, (list, tuple)):
result = []
for d in data:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
return result
elif type(data) == dict:
result = namedtuple(class_name, list(data.keys()))(*list(data.values()))
return result
else:
return data
except Exception as e:
print(class_name, data, e)
return data
def extract_result(cr, format="", index_column=None, class_name=""):
"""
format supports pandas, obj.
"""
err = _error_to_str(cr['error']) if 'error' in cr else None
if 'result' in cr:
if format == "pandas":
if index_column:
return (_to_dataframe(cr['result'], None, index_column), err)
if 'TIME' in cr['result']:
return (_to_dataframe(cr['result'], _to_datetime), err)
elif 'DATE' in cr['result']:
return (_to_dataframe(cr['result'], _to_date), err)
else:
return (_to_dataframe(cr['result']), err)
elif format == "obj" and cr['result'] and class_name:
r = cr['result']
if isinstance(r, (list, tuple)):
result = []
for d in r:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
elif isinstance(r, dict):
result = namedtuple(class_name, list(r.keys()))(*list(r.values()))
else:
result = r
return (result, err)
else:
return (cr['result'], err)
else:
return (None, err)
| mit |
Archman/felapps | setup.py | 1 | 3327 | #!/usr/bin/env python
"""FELApps project (felapps)"""
def readme():
with open('README.rst') as f:
return f.read()
from setuptools import find_packages, setup
import os
import glob
appName = "felapps"
appVersion = "2.0.0"
appDescription = "High-level applications for FEL commissioning."
appLong_description = readme() + '\n\n'
appPlatform = ["Linux"]
appAuthor = "Tong Zhang"
appAuthor_email = "[email protected]"
appLicense = "MIT"
appUrl = "http://archman.github.io/felapps/"
appKeywords = "FEL HLA high-level python wxpython"
requiredpackages = ['numpy','scipy','matplotlib','pyepics','h5py',
'pyrpn','beamline','lmfit'] # install_requires
appScriptsName = ['imageviewer',
'imageviewer.py',
'felformula',
'felformula.py',
'cornalyzer',
'cornalyzer.py',
'dataworkshop',
'dataworkshop.py',
'appdrawer',
'wxmpv',
'runfelapps',
'runfelapps.py',
'update-felapps-menu',
]
#'matchwizard',
ScriptsRoot = 'scripts'
appScripts = [os.path.join(ScriptsRoot,scriptname) for scriptname in appScriptsName]
setup(name = appName,
version = appVersion,
description = appDescription,
long_description = appLong_description,
platforms = appPlatform,
author = appAuthor,
author_email = appAuthor_email,
license = appLicense,
url = appUrl,
keywords = appKeywords,
scripts = appScripts,
#install_requires = requiredpackages,
classifiers = ['Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Physics'],
test_suite = 'nose.collector',
tests_require = ['nose'],
packages = find_packages(exclude=['contrib','tests*']),
#packages = ['felapps'],
#package_dir = {'felapps': 'felapps'},
#package_data = {'felapps': ['configs/imageviewer.xml',
# 'configs/udefs.py'],
# '': ['requirements.txt'],
# }
data_files = [
('share/felapps', ['felapps/configs/imageviewer.xml']),
('share/felapps', ['felapps/configs/udefs.py']),
('share/felapps', ['requirements.txt']),
('share/icons/hicolor/16x16/apps', glob.glob("launchers/icons/short/16/*.png")),
('share/icons/hicolor/32x32/apps', glob.glob("launchers/icons/short/32/*.png")),
('share/icons/hicolor/48x48/apps', glob.glob("launchers/icons/short/48/*.png")),
('share/icons/hicolor/128x128/apps', glob.glob("launchers/icons/short/128/*.png")),
('share/icons/hicolor/256x256/apps', glob.glob("launchers/icons/short/256/*.png")),
('share/icons/hicolor/512x512/apps', glob.glob("launchers/icons/short/512/*.png")),
('share/applications', glob.glob("launchers/*.desktop")),
('share/applications', glob.glob("launchers/*.directory")),
],
)
| mit |
mjgrav2001/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
cle1109/scot | examples/misc/features.py | 4 | 3360 | """
This example shows how to decompose EEG signals into source activations with
CSPVARICA, and subsequently extract single-trial connectivity as features for
LDA classification.
"""
from __future__ import print_function
import numpy as np
try: # new in sklearn 0.19
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
except ImportError:
from sklearn.lda import LDA
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix
import scot.xvschema
# The data set contains a continuous 45 channel EEG recording of a motor
# imagery experiment. The data was preprocessed to reduce eye movement
# artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
# subject was instructed to perform either hand or foot motor imagery. The
# trigger time points of the cues are stored in 'triggers', and 'classes'
# contains the class labels. Duration of the motor imagery period was
# approximately six seconds.
from scot.datasets import fetch
midata = fetch("mi")[0]
raweeg = midata["eeg"]
triggers = midata["triggers"]
classes = midata["labels"]
fs = midata["fs"]
locs = midata["locations"]
# Set random seed for repeatable results
np.random.seed(42)
# Switch backend to scikit-learn
scot.backend.activate('sklearn')
# Set up analysis object
#
# We simply choose a VAR model order of 30, and reduction to 4 components.
ws = scot.Workspace({'model_order': 30}, reducedim=4, fs=fs)
freq = np.linspace(0, fs, ws.nfft_)
# Prepare data
#
# Here we cut out segments from 3s to 4s after each trigger. This is right in
# the middle of the motor imagery period.
data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
# Initialize cross-validation
nfolds = 10
kf = KFold(len(triggers), n_folds=nfolds)
# LDA requires numeric class labels
cl = np.unique(classes)
classids = np.array([dict(zip(cl, range(len(cl))))[c] for c in classes])
# Perform cross-validation
lda = LDA()
cm = np.zeros((2, 2))
fold = 0
for train, test in kf:
fold += 1
# Perform CSPVARICA
ws.set_data(data[train, :, :], classes[train])
ws.do_cspvarica()
# Find optimal regularization parameter for single-trial fitting
# ws.var_.xvschema = scot.xvschema.singletrial
# ws.optimize_var()
ws.var_.delta = 1
# Single-trial fitting and feature extraction
features = np.zeros((len(triggers), 32))
for t in range(len(triggers)):
print('Fold {:2d}/{:2d}, trial: {:d} '.format(fold, nfolds, t),
end='\r')
ws.set_data(data[t, :, :])
ws.fit_var()
con = ws.get_connectivity('ffPDC')
alpha = np.mean(con[:, :, np.logical_and(7 < freq, freq < 13)], axis=2)
beta = np.mean(con[:, :, np.logical_and(15 < freq, freq < 25)], axis=2)
features[t, :] = np.array([alpha, beta]).flatten()
lda.fit(features[train, :], classids[train])
acc_train = lda.score(features[train, :], classids[train])
acc_test = lda.score(features[test, :], classids[test])
print('Fold {:2d}/{:2d}, '
'acc train: {:.3f}, '
'acc test: {:.3f}'.format(fold, nfolds, acc_train, acc_test))
pred = lda.predict(features[test, :])
cm += confusion_matrix(classids[test], pred)
print('\nConfusion Matrix:\n', cm)
print('\nTotal Accuracy: {:.3f}'.format(np.sum(np.diag(cm))/np.sum(cm)))
| mit |
arjunkhode/ASP | lectures/04-STFT/plots-code/windows-2.py | 24 | 1026 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
import math
(fs, x) = UF.wavread('../../../sounds/violin-B3.wav')
N = 1024
pin = 5000
w = np.ones(801)
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
plt.figure(1, figsize=(9.5, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (violin-B3.wav)')
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (rectangular window)')
w = np.blackman(801)
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,3)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (blackman window)')
plt.tight_layout()
plt.savefig('windows-2.png')
plt.show()
| agpl-3.0 |
openfisca/openfisca-matplotlib | openfisca_matplotlib/dataframes.py | 1 | 1644 | # -*- coding: utf-8 -*-
import pandas
from openfisca_core import decompositions
from openfisca_matplotlib.utils import OutNode
def data_frame_from_decomposition_json(simulation, decomposition_json = None, reference_simulation = None,
remove_null = False, label = True, name = False):
# currency = simulation.tax_benefit_system.CURRENCY # TODO : put an option to add currency, for now useless
assert label or name, "At least label or name should be True"
if decomposition_json is None:
decomposition_json = decompositions.get_decomposition_json(simulation.tax_benefit_system)
data = OutNode.init_from_decomposition_json(simulation, decomposition_json)
index = [row.desc for row in data if row.desc not in ('root')]
data_frame = None
for row in data:
if row.desc not in ('root'):
if data_frame is None:
value_columns = ['value_' + str(i) for i in range(len(row.vals))] if len(row.vals) > 1 else ['value']
data_frame = pandas.DataFrame(index = index, columns = ['name'] + value_columns)
data_frame['name'][row.desc] = row.code
data_frame.loc[row.desc, value_columns] = row.vals
data_frame.index.name = "label"
if remove_null:
variables_to_remove = []
for variable in data_frame.index:
print(data_frame.loc[variable, value_columns])
if (data_frame.loc[variable, value_columns] == 0).all():
variables_to_remove.append(variable)
data_frame.drop(variables_to_remove, inplace = True)
data_frame.reset_index(inplace = True)
return data_frame
| agpl-3.0 |
xuewei4d/scikit-learn | sklearn/utils/tests/test_estimator_html_repr.py | 15 | 9724 | from contextlib import closing
from io import StringIO
import pytest
from sklearn import config_context
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import VotingClassifier
from sklearn.feature_selection import SelectPercentile
from sklearn.cluster import Birch
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import StackingRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.utils._estimator_html_repr import _write_label_html
from sklearn.utils._estimator_html_repr import _get_visual_block
from sklearn.utils._estimator_html_repr import estimator_html_repr
@pytest.mark.parametrize("checked", [True, False])
def test_write_label_html(checked):
# Test checking logic and labeling
name = "LogisticRegression"
tool_tip = "hello-world"
with closing(StringIO()) as out:
_write_label_html(out, name, tool_tip, checked=checked)
html_label = out.getvalue()
assert 'LogisticRegression</label>' in html_label
assert html_label.startswith('<div class="sk-label-container">')
assert '<pre>hello-world</pre>' in html_label
if checked:
assert 'checked>' in html_label
@pytest.mark.parametrize('est', ['passthrough', 'drop', None])
def test_get_visual_block_single_str_none(est):
# Test estimators that are represnted by strings
est_html_info = _get_visual_block(est)
assert est_html_info.kind == 'single'
assert est_html_info.estimators == est
assert est_html_info.names == str(est)
assert est_html_info.name_details == str(est)
def test_get_visual_block_single_estimator():
est = LogisticRegression(C=10.0)
est_html_info = _get_visual_block(est)
assert est_html_info.kind == 'single'
assert est_html_info.estimators == est
assert est_html_info.names == est.__class__.__name__
assert est_html_info.name_details == str(est)
def test_get_visual_block_pipeline():
pipe = Pipeline([
('imputer', SimpleImputer()),
('do_nothing', 'passthrough'),
('do_nothing_more', None),
('classifier', LogisticRegression())
])
est_html_info = _get_visual_block(pipe)
assert est_html_info.kind == 'serial'
assert est_html_info.estimators == tuple(step[1] for step in pipe.steps)
assert est_html_info.names == ['imputer: SimpleImputer',
'do_nothing: passthrough',
'do_nothing_more: passthrough',
'classifier: LogisticRegression']
assert est_html_info.name_details == [str(est) for _, est in pipe.steps]
def test_get_visual_block_feature_union():
f_union = FeatureUnion([
('pca', PCA()), ('svd', TruncatedSVD())
])
est_html_info = _get_visual_block(f_union)
assert est_html_info.kind == 'parallel'
assert est_html_info.names == ('pca', 'svd')
assert est_html_info.estimators == tuple(
trans[1] for trans in f_union.transformer_list)
assert est_html_info.name_details == (None, None)
def test_get_visual_block_voting():
clf = VotingClassifier([
('log_reg', LogisticRegression()),
('mlp', MLPClassifier())
])
est_html_info = _get_visual_block(clf)
assert est_html_info.kind == 'parallel'
assert est_html_info.estimators == tuple(trans[1]
for trans in clf.estimators)
assert est_html_info.names == ('log_reg', 'mlp')
assert est_html_info.name_details == (None, None)
def test_get_visual_block_column_transformer():
ct = ColumnTransformer([
('pca', PCA(), ['num1', 'num2']),
('svd', TruncatedSVD, [0, 3])
])
est_html_info = _get_visual_block(ct)
assert est_html_info.kind == 'parallel'
assert est_html_info.estimators == tuple(
trans[1] for trans in ct.transformers)
assert est_html_info.names == ('pca', 'svd')
assert est_html_info.name_details == (['num1', 'num2'], [0, 3])
def test_estimator_html_repr_pipeline():
num_trans = Pipeline(steps=[
('pass', 'passthrough'),
('imputer', SimpleImputer(strategy='median'))
])
cat_trans = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant',
missing_values='empty')),
('one-hot', OneHotEncoder(drop='first'))
])
preprocess = ColumnTransformer([
('num', num_trans, ['a', 'b', 'c', 'd', 'e']),
('cat', cat_trans, [0, 1, 2, 3])
])
feat_u = FeatureUnion([
('pca', PCA(n_components=1)),
('tsvd', Pipeline([('first', TruncatedSVD(n_components=3)),
('select', SelectPercentile())]))
])
clf = VotingClassifier([
('lr', LogisticRegression(solver='lbfgs', random_state=1)),
('mlp', MLPClassifier(alpha=0.001))
])
pipe = Pipeline([
('preprocessor', preprocess), ('feat_u', feat_u), ('classifier', clf)
])
html_output = estimator_html_repr(pipe)
# top level estimators show estimator with changes
assert str(pipe) in html_output
for _, est in pipe.steps:
assert (f"<div class=\"sk-toggleable__content\">"
f"<pre>{str(est)}") in html_output
# low level estimators do not show changes
with config_context(print_changed_only=True):
assert str(num_trans['pass']) in html_output
assert 'passthrough</label>' in html_output
assert str(num_trans['imputer']) in html_output
for _, _, cols in preprocess.transformers:
assert f"<pre>{cols}</pre>" in html_output
# feature union
for name, _ in feat_u.transformer_list:
assert f"<label>{name}</label>" in html_output
pca = feat_u.transformer_list[0][1]
assert f"<pre>{str(pca)}</pre>" in html_output
tsvd = feat_u.transformer_list[1][1]
first = tsvd['first']
select = tsvd['select']
assert f"<pre>{str(first)}</pre>" in html_output
assert f"<pre>{str(select)}</pre>" in html_output
# voting classifer
for name, est in clf.estimators:
assert f"<label>{name}</label>" in html_output
assert f"<pre>{str(est)}</pre>" in html_output
@pytest.mark.parametrize("final_estimator", [None, LinearSVC()])
def test_stacking_classsifer(final_estimator):
estimators = [('mlp', MLPClassifier(alpha=0.001)),
('tree', DecisionTreeClassifier())]
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator)
html_output = estimator_html_repr(clf)
assert str(clf) in html_output
# If final_estimator's default changes from LogisticRegression
# this should be updated
if final_estimator is None:
assert "LogisticRegression(" in html_output
else:
assert final_estimator.__class__.__name__ in html_output
@pytest.mark.parametrize("final_estimator", [None, LinearSVR()])
def test_stacking_regressor(final_estimator):
reg = StackingRegressor(
estimators=[('svr', LinearSVR())], final_estimator=final_estimator)
html_output = estimator_html_repr(reg)
assert str(reg.estimators[0][0]) in html_output
assert "LinearSVR</label>" in html_output
if final_estimator is None:
assert "RidgeCV</label>" in html_output
else:
assert final_estimator.__class__.__name__ in html_output
def test_birch_duck_typing_meta():
# Test duck typing meta estimators with Birch
birch = Birch(n_clusters=AgglomerativeClustering(n_clusters=3))
html_output = estimator_html_repr(birch)
# inner estimators do not show changes
with config_context(print_changed_only=True):
assert f"<pre>{str(birch.n_clusters)}" in html_output
assert "AgglomerativeClustering</label>" in html_output
# outer estimator contains all changes
assert f"<pre>{str(birch)}" in html_output
def test_ovo_classifier_duck_typing_meta():
# Test duck typing metaestimators with OVO
ovo = OneVsOneClassifier(LinearSVC(penalty='l1'))
html_output = estimator_html_repr(ovo)
# inner estimators do not show changes
with config_context(print_changed_only=True):
assert f"<pre>{str(ovo.estimator)}" in html_output
assert "LinearSVC</label>" in html_output
# outter estimator
assert f"<pre>{str(ovo)}" in html_output
def test_duck_typing_nested_estimator():
# Test duck typing metaestimators with GP
kernel = RationalQuadratic(length_scale=1.0, alpha=0.1)
gp = GaussianProcessRegressor(kernel=kernel)
html_output = estimator_html_repr(gp)
assert f"<pre>{str(kernel)}" in html_output
assert f"<pre>{str(gp)}" in html_output
@pytest.mark.parametrize('print_changed_only', [True, False])
def test_one_estimator_print_change_only(print_changed_only):
pca = PCA(n_components=10)
with config_context(print_changed_only=print_changed_only):
pca_repr = str(pca)
html_output = estimator_html_repr(pca)
assert pca_repr in html_output
| bsd-3-clause |
sbg/Mitty | mitty/empirical/gc.py | 1 | 4328 | """Computes GC content vs coverage from a BAM.
The reference is split up into sections say 10 kb long. For each chunk we compute the GC content and
the average coverage. We return this as an array spanning the reference with GC and coverage values.
This array can be further processed to get metrics useful for modeling the GC bias.
The task is parallelized by chromosome.
"""
from multiprocessing import Pool
import time
import pickle
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pysam
logger = logging.getLogger(__name__)
def gc_and_coverage_for_region(bam_fp, fasta_fp, region):
"""Compute GC content (as a fraction) and average coverage for this region
:param bam_fp:
:param fasta_fp:
:param region: e.g. "1:10000-20000"
:return: gc, cov
"""
seq = fasta_fp.fetch(region=region)
if seq.count('N') / float(len(seq)) > 0.1:
return None, None # Probably centromere or telomere
gc = float(seq.count('G') + seq.count('C')) / len(seq)
cov_l = [b.n for b in bam_fp.pileup(region=region)]
cov = float(sum(cov_l)) / max(len(cov_l), 1)
return gc, cov
def gc_and_coverage_for_chromosome(bam_fname, fasta_fname, chrom_idx, block_len=10000):
"""
:param bam_fname: Passing file names rather than file objects for parallelization
:param fasta_fname:
:param chrom_idx: 0, 1, 2 ... referring to chroms in the bam header
:param block_len: how many bp to chunk by
:return: an array of gc and cov values
"""
bam_fp = pysam.AlignmentFile(bam_fname, mode='rb')
region_start, region_end = 1, bam_fp.header['SQ'][chrom_idx]['LN']
logger.debug('Processing {}:{}-{}'.format(bam_fp.header['SQ'][chrom_idx]['SN'], region_start, region_end))
fasta_fp = pysam.FastaFile(fasta_fname)
return np.array([
gc_and_coverage_for_region(bam_fp, fasta_fp, region='{}:{}-{}'.format(bam_fp.header['SQ'][chrom_idx]['SN'], r, r + block_len))
for r in range(region_start, region_end, block_len)
], dtype=[('gc', float), ('coverage', float)])
def process_bam_parallel(bam_fname, fasta_fname, pkl, block_len=10000, threads=4):
p = Pool(threads)
t0 = time.time()
bam_fp = pysam.AlignmentFile(bam_fname, mode='rb')
max_chroms = min(24, len(bam_fp.header['SQ']))
gc_cov = {'block_len': block_len, 'seq_info': bam_fp.header['SQ']}
for chrom_data in p.imap_unordered(
process_bam_section_w,
({"bam_fname": bam_fname, "fasta_fname": fasta_fname, "chrom_idx": i, "block_len": block_len}
for i in range(0, max_chroms))):
gc_cov.update({bam_fp.header['SQ'][chrom_data[0]]['SN']: chrom_data[1]})
t1 = time.time()
logger.debug('Processed {} ({} s)'.format(bam_fp.header['SQ'][chrom_data[0]], t1 - t0))
pickle.dump(gc_cov, open(pkl, 'wb'))
return gc_cov
def process_bam_section_w(args):
"""A thin wrapper to allow proper tracebacks when things go wrong in a thread
:param args:
:return:
"""
import traceback
try:
# return gc_and_coverage_for_chromosome(bam_fname, fasta_fname, chrom_idx, block_len=10000)
return (args['chrom_idx'], gc_and_coverage_for_chromosome(**args))
except Exception as e:
traceback.print_exc()
print('')
raise e
def plot_gc_cov(gc_cov, max_cov=60, title='GC/cov'):
"""Plot a multi panel plot fo GC/cov data
:param gc_cov:
:return:
"""
from matplotlib.colors import LogNorm
# TODO: hardcoded for 24 chromosomes, make this data aware?
seq = gc_cov['seq_info']
fig = plt.figure(figsize=(12, 8))
rows, cols = 4, 6
gc_lim = [0.0, 1.0]
cov_lim = [0.0, max_cov]
for row in range(rows):
for col in range(cols):
if row * cols + col > len(seq) - 1: break
sn = seq[row * cols + col]['SN']
ax = plt.subplot(rows, cols, row * cols + col + 1)
x, y = gc_cov[sn]['gc'], gc_cov[sn]['coverage']
x, y = x[~(np.isnan(x) | np.isnan(y))], y[~(np.isnan(x) | np.isnan(y))]
# plt.plot(x, y, 'k.', ms=0.1)
ax.hist2d(x, y, bins=71, range=[gc_lim, cov_lim], cmap=plt.cm.gray_r, norm=LogNorm())
plt.title(sn)
plt.setp(ax, xlim=gc_lim, ylim=cov_lim)
if row == rows - 1 and col == 0:
ax.set_xlabel('GC content')
ax.set_ylabel('Coverage')
else:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.suptitle(title) | apache-2.0 |
tclose/python-neo | examples/simple_plot_with_matplotlib.py | 7 | 1057 | # -*- coding: utf-8 -*-
"""
This is an example for plotting neo object with maplotlib.
"""
import urllib
import numpy as np
import quantities as pq
from matplotlib import pyplot
import neo
url = 'https://portal.g-node.org/neo/'
distantfile = url + 'neuroexplorer/File_neuroexplorer_2.nex'
localfile = 'File_neuroexplorer_2.nex'
urllib.urlretrieve(distantfile, localfile)
reader = neo.io.NeuroExplorerIO(filename='File_neuroexplorer_2.nex')
bl = reader.read(cascade=True, lazy=False)[0]
for seg in bl.segments:
fig = pyplot.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
ax1.set_title(seg.file_origin)
mint = 0 * pq.s
maxt = np.inf * pq.s
for i, asig in enumerate(seg.analogsignals):
times = asig.times.rescale('s').magnitude
asig = asig.rescale('mV').magnitude
ax1.plot(times, asig)
trains = [st.rescale('s').magnitude for st in seg.spiketrains]
colors = pyplot.cm.jet(np.linspace(0, 1, len(seg.spiketrains)))
ax2.eventplot(trains, colors=colors)
pyplot.show()
| bsd-3-clause |
xesscorp/myhdlpeek | myhdlpeek/peekerbase.py | 1 | 17932 | # -*- coding: utf-8 -*-
# Copyright (c) 2017-2020, XESS Corp. The MIT License (MIT).
# TODO: Use https://github.com/bendichter/brokenaxes to break long traces into segments.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import re
from builtins import dict, int, str, super
from collections import namedtuple
import IPython.display as DISP
import matplotlib.pyplot as plt
import nbwavedrom
from future import standard_library
from tabulate import tabulate
from .trace import *
standard_library.install_aliases()
class PeekerBase(object):
peekers = dict() # Global list of all Peekers.
USE_JUPYTER = False
USE_WAVEDROM = False
unit_time = None # Time interval for a single tick-mark span.
def __new__(cls, *args, **kwargs):
# Keep PeekerBase from being instantiated.
if cls is PeekerBase:
raise TypeError("PeekerBase class may not be instantiated")
return object.__new__(cls)
def __init__(self, signal, name, **kwargs):
# Create storage for a signal trace.
self.trace = Trace()
# Configure the Peeker and its Trace instance.
self.config(**kwargs)
# Assign a unique name to this peeker.
self.name_dup = False # Start off assuming the name has no duplicates.
index = 0 # Starting index for disambiguating duplicates.
nm = "{name}[{index}]".format(**locals()) # Create name with bracketed index.
# Search through the peeker names for a match.
while nm in self.peekers:
# A match was found, so mark the matching names as duplicates.
self.peekers[nm].name_dup = True
self.name_dup = True
# Go to the next index and see if that name is taken.
index += 1
nm = "{name}[{index}]".format(**locals())
self.trace.name = nm # Assign the unique name.
# Keep a reference to the signal so we can get info about it later, if needed.
self.signal = signal
# Add this peeker to the global list.
self.peekers[self.trace.name] = self
@classmethod
def config_defaults(cls, **kwargs):
"""Setup options and shortcut functions."""
# Configure Trace defaults.
Trace.config_defaults(**kwargs)
global clear_traces, show_traces, show_waveforms, show_text_table, show_html_table, export_dataframe
cls.USE_WAVEDROM = kwargs.pop("use_wavedrom", cls.USE_WAVEDROM)
if cls.USE_WAVEDROM:
cls.show_waveforms = cls.to_wavedrom
cls.show_traces = traces_to_wavedrom
else:
cls.show_waveforms = cls.to_matplotlib
cls.show_traces = traces_to_matplotlib
# Create an intermediary function to call cls.show_waveforms and assign it to show_waveforms.
# Then if cls.show_waveforms is changed, any calls to show_waveforms will call the changed
# function. Directly assigning cls.show_waveforms to show_waveforms would mean any external
# code that calls show_waveforms() would always call the initially-assigned function even if
# cls.show_waveforms got a different assignment later.
def shw_wvfrms(*args, **kwargs):
return cls.show_waveforms(*args, **kwargs)
show_waveforms = shw_wvfrms
def shw_trcs(*args, **kwargs):
return cls.show_traces(*args, **kwargs)
show_traces = shw_trcs
# These class methods don't change as the options are altered, so just assign them
# to shortcuts without creating intermediary functions like above.
clear_traces = cls.clear_traces
export_dataframe = cls.to_dataframe
show_text_table = cls.to_text_table
show_html_table = cls.to_html_table
cls.USE_JUPYTER = kwargs.pop("use_jupyter", cls.USE_JUPYTER)
# Remaining keyword args.
for k, v in kwargs.items():
setattr(cls, k, copy(v))
def config(self, **kwargs):
"""
Set configuration for a particular Peeker.
"""
# Configure trace instance.
self.trace.config(**kwargs)
# Remaining keyword args.
for k, v in kwargs.items():
if isinstance(v, dict):
setattr(self, k, copy(getattr(self, k, {})))
getattr(self, k).update(v)
else:
setattr(self, k, copy(v))
@classmethod
def clear(cls):
"""Clear the global list of Peekers."""
cls.peekers = dict()
cls.unit_time = None
@classmethod
def clear_traces(cls):
"""Clear waveform samples from the global list of Peekers."""
for p in cls.peekers.values():
p.trace.clear()
cls.unit_time = None
@classmethod
def start_time(cls):
"""Return the time of the first signal transition captured by the peekers."""
return min((p.trace.start_time() for p in cls.peekers))
@classmethod
def stop_time(cls):
"""Return the time of the last signal transition captured by the peekers."""
return max((p.trace.stop_time() for p in cls.peekers))
@classmethod
def _clean_names(cls):
"""
Remove indices from non-repeated peeker names that don't need them.
When created, all peekers get an index appended to their name to
disambiguate any repeated names. If the name isn't actually repeated,
then the index is removed.
"""
index_re = "\[\d+\]$"
for name, peeker in list(cls.peekers.items()):
if not peeker.name_dup:
# Base name is not repeated, so remove any index.
new_name = re.sub(index_re, "", name)
if new_name != name:
# Index got removed so name changed. Therefore,
# remove the original entry and replace with
# the renamed Peeker.
cls.peekers.pop(name)
peeker.trace.name = new_name
cls.peekers[new_name] = peeker
@classmethod
def to_dataframe(cls, *names, **kwargs):
"""
Convert traces stored in peekers into a Pandas DataFrame of times and trace values.
Args:
*names: A list of strings containing the names for the Peekers that
will be processed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
A DataFrame with the columns for the named traces and time as the index.
"""
cls._clean_names()
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the traces for the Peekers matching the names.
traces = [getattr(cls.peekers.get(name), "trace", None) for name in names]
return traces_to_dataframe(*traces, **kwargs)
@classmethod
def to_table_data(cls, *names, **kwargs):
"""
Convert traces stored in peekers into a list of times and trace values.
Args:
*names: A list of strings containing the names for the Peekers that
will be processed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
List of lists containing the time and the value of each trace at that time.
"""
cls._clean_names()
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the traces for the Peekers matching the names.
traces = [getattr(cls.peekers.get(name), "trace", None) for name in names]
return traces_to_table_data(*traces, **kwargs)
@classmethod
def to_table(cls, *names, **kwargs):
format = kwargs.pop("format", "simple")
table_data, headers = cls.to_table_data(*names, **kwargs)
return tabulate(tabular_data=table_data, headers=headers, tablefmt=format)
@classmethod
def to_text_table(cls, *names, **kwargs):
if "format" not in kwargs:
kwargs["format"] = "simple"
print(cls.to_table(*names, **kwargs))
@classmethod
def to_html_table(cls, *names, **kwargs):
kwargs["format"] = "html"
tbl_html = cls.to_table(*names, **kwargs)
# Generate the HTML from the JSON.
DISP.display_html(DISP.HTML(tbl_html))
@classmethod
def get(cls, name):
"""Return the Peeker having the given name."""
cls._clean_names()
return cls.peekers.get(name)
@classmethod
def get_traces(cls):
"""Return a list of all the traces in the available Peekers."""
traces = [getattr(p, "trace", None) for p in cls.peekers.values()]
return [trc for trc in traces if trc is not None]
@classmethod
def to_matplotlib(cls, *names, **kwargs):
"""
Convert waveforms stored in peekers into a matplotlib plot.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
title_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
caption: String containing the title placed across the bottom of the display.
caption_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
grid_fmt (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
time_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
width: The width of the waveform display in inches.
height: The height of the waveform display in inches.
Returns:
Figure and axes created by matplotlib.pyplot.subplots.
"""
cls._clean_names()
if cls.unit_time is None:
cls.unit_time = calc_unit_time(*cls.get_traces())
Trace.unit_time = cls.unit_time
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the Peekers matching the names.
peekers = [cls.get(name) for name in names]
traces = [getattr(p, "trace", None) for p in peekers]
return traces_to_matplotlib(*traces, **kwargs)
@classmethod
def to_wavejson(cls, *names, **kwargs):
"""
Convert waveforms stored in peekers into a WaveJSON data structure.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
Returns:
A dictionary with the JSON data for the waveforms.
"""
cls._clean_names()
if cls.unit_time is None:
cls.unit_time = calc_unit_time(*cls.get_traces())
Trace.unit_time = cls.unit_time
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the Peekers matching the names.
peekers = [cls.peekers.get(name) for name in names]
traces = [getattr(p, "trace", None) for p in peekers]
return traces_to_wavejson(*traces, **kwargs)
@classmethod
def to_wavedrom(cls, *names, **kwargs):
"""
Display waveforms stored in peekers in Jupyter notebook.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
width: The width of the waveform display in pixels.
skin: Selects the set of graphic elements used to create waveforms.
Returns:
Nothing.
"""
# Handle keyword args explicitly for Python 2 compatibility.
width = kwargs.get("width")
skin = kwargs.get("skin", "default")
if cls.USE_JUPYTER:
# Used with older Jupyter notebooks.
wavejson_to_wavedrom(
cls.to_wavejson(*names, **kwargs), width=width, skin=skin
)
else:
# Supports the new Jupyter Lab.
return nbwavedrom.draw(cls.to_wavejson(*names, **kwargs))
def delay(self, delta):
"""Return the trace data shifted in time by delta units."""
return self.trace.delay(delta)
def binarize(self):
"""Return trace of sample values set to 1 (if true) or 0 (if false)."""
return self.trace.binarize()
def __eq__(self, pkr):
return self.trace == pkr
def __ne__(self, pkr):
return self.trace != pkr
def __le__(self, pkr):
return self.trace <= pkr
def __ge__(self, pkr):
return self.trace >= pkr
def __lt__(self, pkr):
return self.trace < pkr
def __gt__(self, pkr):
return self.trace > pkr
def __add__(self, pkr):
return self.trace + pkr
def __sub__(self, pkr):
return self.trace - pkr
def __mul__(self, pkr):
return self.trace * pkr
def __floordiv__(self, pkr):
return self.trace // pkr
def __truediv__(self, pkr):
return self.trace / pkr
def __mod__(self, pkr):
return self.trace % pkr
def __lshift__(self, pkr):
return self.trace << pkr
def __rshift__(self, pkr):
return self.trace >> pkr
def __and__(self, pkr):
return self.trace & pkr
def __or__(self, pkr):
return self.trace | pkr
def __xor__(self, pkr):
return self.trace ^ pkr
def __pow__(self, pkr):
return self.trace ** pkr
def __pos__(self):
return +self.trace
def __neg__(self):
return -self.trace
def __not__(self):
return not self.trace
def __inv__(self):
return ~self.trace
def __abs__(self):
return abs(self.trace)
def trig_times(self):
"""Return list of times trace value is true (non-zero)."""
return self.trace.trig_times()
def _sort_names(names):
"""
Sort peeker names by index and alphabetically.
For example, the peeker names would be sorted as a[0], b[0], a[1], b[1], ...
"""
def index_key(lbl):
"""Index sorting."""
m = re.match(".*\[(\d+)\]$", lbl) # Get the bracketed index.
if m:
return int(m.group(1)) # Return the index as an integer.
return -1 # No index found so it comes before everything else.
def name_key(lbl):
"""Name sorting."""
m = re.match("^([^\[]+)", lbl) # Get name preceding bracketed index.
if m:
return m.group(1) # Return name.
return "" # No name found.
srt_names = sorted(names, key=name_key)
srt_names = sorted(srt_names, key=index_key)
return srt_names
| mit |
lizardsystem/lizard-kml | lizard_kml/jarkus/nourishmentplot.py | 1 | 25472 | # -*- coding: utf-8 -*-
import logging
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
import matplotlib.gridspec
logger = logging.getLogger(__name__)
# simplify for colors
typemap = {
'': 'strand',
'strandsuppletie': 'strand',
'dijkverzwaring': 'duin',
'strandsuppletie banket': 'strand',
'duinverzwaring': 'duin',
'strandsuppletie+vooroever': 'overig',
'Duinverzwaring': 'duin',
'duin': 'duin',
'duinverzwaring en strandsuppleti': 'duin',
'vooroever': 'vooroever',
'zeewaartse duinverzwaring': 'duin',
'banket': 'strand',
'geulwand': 'geulwand',
'anders': 'overig',
'landwaartse duinverzwaring': 'duin',
'depot': 'overig',
'vooroeversuppletie': 'vooroever',
'onderwatersuppletie': 'vooroever',
'geulwandsuppletie': 'geulwand'
}
beachcolors = {
'duin': 'peru',
'strand': 'khaki',
'vooroever': 'aquamarine',
'geulwand': 'lightseagreen',
'overig': 'grey'
}
def is_not_empty(array):
"""
Test whether an numpy array is not empty.
True if empty, False if not.
"""
return (not np.isnan(array).all())
def is_empty(array):
"""
Test whether an numpy array is empty.
True if empty, False if not.
"""
return np.isnan(array).all()
def combinedplot(dfs, figsize=None):
"""Create a combined plot of the coastal data"""
if figsize is None:
figsize = (8, 9)
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=figsize)
# We define a grid of 5 areas
gs = matplotlib.gridspec.GridSpec(5, 1, height_ratios=[5, 2, 2, 2, 2],
left=0.08, right=0.76, top=0.96,
bottom=0.04)
gs.update(hspace=0.1)
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
date2num = matplotlib.dates.date2num
#Figuur 1: momentane kustlijn / te toetsenkustlijn / basiskustlijn,
# oftewel onderstaand tweede figuur. Bij voorkeur wel in het Nederlands en
# volgens mij klopt de tekst bij de as nu niet (afstand tot RSP (meters))
# The first axis contains the coastal indicators related to volume
# Create the axis, based on the gridspec
ax1 = fig.add_subplot(gs[0])
# Set the main title
ax1.set_title('Indicatoren van de toestand van de kust transect %d (%s)'
% (transect, str(areaname).strip()))
# Plot the three lines
if (is_empty(mkldf['momentary_coastline']) and
is_empty(bkldf['basal_coastline']) and
is_empty(bkldf['testing_coastline']) ):
# Hack: set first value to 0.0, to make sure the share x-axis is
# generated properly for the other axes.
len_basal_coastline = len(bkldf['basal_coastline'])
if len_basal_coastline > 0:
# set last element to 0.0
bkldf['basal_coastline'][len_basal_coastline-1] = 0.0
ax1.plot(date2num(mkldf['time_MKL']), mkldf['momentary_coastline'],
label='momentane kustlijn', **props)
ax1.plot(date2num(bkldf['time']), bkldf['basal_coastline'],
label='basiskustlijn', **props)
ax1.plot(date2num(bkldf['time']), bkldf['testing_coastline'],
label='te toetsenkustlijn', **props)
# Plot the legend. This uses the label
ax1.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
# Hide the ticks for this axis (can't use set_visible on xaxis because it
# is shared)
try:
for label in ax1.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No date set on axes, because of no data. No worries...
pass
# set y-label no matter what
ax1.set_ylabel('Afstand [m]')
# Figuur 2: duinvoet / hoogwater / laagwater, vanaf (ongeveer) 1848 voor
# raai om de kilometer. Voor andere raaien vanaf 1965 (Jarkus)
ax2 = fig.add_subplot(gs[1], sharex=ax1)
if (is_not_empty(dfdf['dune_foot_upperMKL_cross']) or
is_not_empty(dfdf['dune_foot_threeNAP_cross']) or
is_not_empty(shorelinedf['mean_high_water']) or
is_not_empty(shorelinedf['mean_low_water']) ):
ax2.plot(date2num(shorelinedf['time']), shorelinedf['mean_low_water'],
label='Laagwater positie', **props)
ax2.plot(date2num(shorelinedf['time']), shorelinedf['mean_high_water'],
label='Hoogwater positie', **props)
ax2.plot(date2num(dfdf['time']), dfdf['dune_foot_upperMKL_cross'],
label='Duinvoet (BKL-schijf)', **props)
ax2.plot(date2num(dfdf['time']), dfdf['dune_foot_threeNAP_cross'],
label='Duinvoet (NAP+3m)', **props)
ax2.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
# Only show up to 5 major ticks on y-axis.
ax2.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5))
# Again remove the xaxis labels
try:
for label in ax2.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
# show y-label no matter what
ax2.set_ylabel('Afstand [m]')
# Figuur 3 strandbreedte bij hoogwater / strandbreedte bij laagwater (ook
# vanaf ongeveer 1848 voor raai om de kilometer, voor andere raaien vanaf
# 1965 )
# Create another axis for the width and position parameters
# Share the x axis with axes ax1
ax3 = fig.add_subplot(gs[2], sharex=ax1)
# Plot the 3 lines
if (is_not_empty(bwdf['beach_width_at_MHW']) or
is_not_empty(bwdf['beach_width_at_MLW'])):
# !!! fill_between does not work with a datetime x (first element);
# leaving as is for now
# ax3.fill_between(np.asarray(bwdf['time']),
# np.asarray(bwdf['beach_width_at_MLW']),
# np.asarray(bwdf['beach_width_at_MHW']),
# alpha=0.3,
# color='black')
ax3.plot(date2num(bwdf['time']), bwdf['beach_width_at_MLW'],
label='strandbreedte MLW', **props)
ax3.plot(date2num(bwdf['time']), bwdf['beach_width_at_MHW'],
label='strandbreedte MHW', **props)
# Only show 5 major ticks on y-axis
ax3.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5))
ax3.yaxis.grid(False)
# Again remove the xaxis labels
try:
for label in ax3.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
# Place the legend
ax3.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
# Dune foot is position but relative to RSP, so we can call it a width
# show y-label no matter what
ax3.set_ylabel('Breedte [m]')
# Figuur 4 uitgevoerde suppleties, tekst bij de as bij voorkeur
# Volume (m3/m)
# Create the third axes, again sharing the x-axis
ax4 = fig.add_subplot(gs[3], sharex=ax1)
# Loop over eadge row, because we need to look up colors (bit of a hack)
if len(nourishmentdf) > 0:
# We need to store labels and a "proxy artist".
proxies = []
labels = []
for i, row in nourishmentdf.iterrows():
# Look up the color based on the type of nourishment
try:
color = beachcolors[typemap[row['type'].strip()]]
except KeyError, e:
logger.error("undefined beachcolor type: %s" % e)
color = beachcolors['overig']
# Strip spaces
label = row['type'].strip()
if label == 'onderwatersuppletie': # rename this label
label = 'vooroeversuppletie'
# Common properties
ax4_props = dict(alpha=0.7, linewidth=2)
# Plot a bar per nourishment
ax4.fill_betweenx([0, row['volm']], date2num(row['beg_date']),
date2num(row['end_date']), edgecolor=color,
color=color, **ax4_props)
if label not in labels:
# Fill_between's are not added with labels.
# So we'll create a proxy artist (a non plotted rectangle, with
# the same color)
# http://matplotlib.org/users/legend_guide.html
proxy = matplotlib.patches.Rectangle((0, 0), 1, 1,
facecolor=color,
**ax4_props)
proxy.set_label(label)
proxies.append(proxy)
labels.append(label)
# Only show 5 major ticks on y-axis
ax4.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5))
ax4.yaxis.grid(False)
# Place the legend
ax4.legend(proxies, labels, bbox_to_anchor=(1.01, 1), loc=2,
borderaxespad=0.)
# Again remove the xaxis labels
try:
for label in ax4.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
# show y-label anyway
ax4.set_ylabel('Volume [m3/m]')
# Figuur 5 Faalkans eerste duinrij (laatste figuur onderaan)
# Sharing the x-axis
ax5 = fig.add_subplot(gs[4], sharex=ax1)
if is_not_empty(dunefaildf['probability_failure']):
ax5.plot(date2num(dunefaildf['time']), dunefaildf['probability_failure'],
label='faalkans 1e duinrij', **props)
# This one we want to see
ax5.xaxis.set_visible(True)
ax5.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5))
ax5.yaxis.grid(False)
ax5.set_yscale('log')
# Now we plot the proxies with corresponding legends.
ax5.legend(bbox_to_anchor=(1.01, 0), loc=3, borderaxespad=0.)
ax5.set_xlabel('Tijd [jaren]')
xlim = ax5.get_xlim()
N = int(np.floor(np.diff(xlim) / 365 / 5))
if N > 10:
N = 10
xaxis_locator = matplotlib.ticker.MaxNLocator(N)
ax5.xaxis.set_major_locator(xaxis_locator)
ax5.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y'))
# show y-label no matter what
ax5.set_ylabel('Kans [1/jr]')
return fig
def all_else_fails_plot(dfs, figsize=None):
"""All else fails plot. Lots of checks are put in place already. Sometimes
though, combined plot generation may result in a matplotlib.dates
ValueError: ordinal must be >= 1. To show a figure anyway, this figure is
shown.
"""
if figsize is None:
figsize = (7, 9)
transectdf = dfs['transectdf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
fig = plt.figure(figsize=figsize)
gs = matplotlib.gridspec.GridSpec(1, 1)
gs.update(hspace=0.1)
ax = fig.add_subplot(gs[0])
ax.set_title('Onvoldoende data voor transect %d (%s) grafieken'
% (transect, str(areaname).strip()))
return fig
def test_plot_ax1(dfs):
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=(7, 9))
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
#Figuur 1: momentane kustlijn / te toetsenkustlijn / basiskustlijn,
# oftewel onderstaand tweede figuur. Bij voorkeur wel in het Nederlands en
# volgens mij klopt de tekst bij de as nu niet (afstand tot RSP (meters))
# The first axis contains the coastal indicators related to volume
# Create the axis, based on the gridspec
ax1 = fig.add_subplot(111)
# Set the main title
ax1.set_title('Indicatoren van de toestand van de kust\ntransect %d (%s)'
% (transect, str(areaname).strip()))
# Plot the three lines if there's at least one non-empty array
if is_not_empty(mkldf['momentary_coastline']) or \
is_not_empty(bkldf['basal_coastline']) or \
is_not_empty(bkldf['testing_coastline']):
date2num = matplotlib.dates.date2num
ax1.plot(date2num(mkldf['time_MKL']), mkldf['momentary_coastline'],
label='momentane kustlijn', **props)
ax1.plot(date2num(bkldf['time']), bkldf['basal_coastline'],
label='basiskustlijn', **props)
ax1.plot(date2num(bkldf['time']), bkldf['testing_coastline'],
label='te toetsenkustlijn', **props)
# Plot the legend. This uses the label
ax1.legend(loc='upper left')
# Hide the ticks for this axis (can't use set_visible on xaxis because it
# is shared)
try:
for label in ax1.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No date set on axes, because of no data. No worries...
pass
# Show the y axis label
ax1.set_ylabel('Afstand [m]')
return fig
def test_plot_ax2(dfs):
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=(7, 9))
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
date2num = matplotlib.dates.date2num
# Figuur 2: duinvoet / hoogwater / laagwater, vanaf (ongeveer) 1848 voor
# raai om de kilometer. Voor andere raaien vanaf 1965 (Jarkus)
ax2 = fig.add_subplot(111)
# Plot the four lines if at least is non-empty (= not all NaNs)
if (is_not_empty(dfdf['dune_foot_upperMKL_cross']) or
is_not_empty(dfdf['dune_foot_threeNAP_cross']) or
is_not_empty(shorelinedf['mean_high_water']) or
is_not_empty(shorelinedf['mean_low_water']) ):
ax2.plot(date2num(dfdf['time']), dfdf['dune_foot_upperMKL_cross'],
label='Duinvoet (BKL-schijf)', **props)
ax2.plot(date2num(dfdf['time']), dfdf['dune_foot_threeNAP_cross'],
label='Duinvoet (NAP+3 meter)', **props)
ax2.plot(date2num(shorelinedf['time']), shorelinedf['mean_high_water'],
label='Hoogwater positie', **props)
ax2.plot(date2num(shorelinedf['time']), shorelinedf['mean_low_water'],
label='Laagwater positie', **props)
leg = ax2.legend(loc='best')
leg.get_frame().set_alpha(0.7)
# Look up the location of the tick labels, because we're removing all but
# the first and last.
locs = [ax2.yaxis.get_ticklocs()[0], ax2.yaxis.get_ticklocs()[-1]]
# We don't want too much cluttering
ax2.yaxis.set_ticks(locs)
# Again remove the xaxis labels
try:
for label in ax2.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
ax2.set_ylabel('Afstand [m]')
return fig
def test_plot_ax3(dfs):
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=(7, 9))
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
date2num = matplotlib.dates.date2num
# Figuur 3 strandbreedte bij hoogwater / strandbreedte bij laagwater (ook
# vanaf ongeveer 1848 voor raai om de kilometer, voor andere raaien vanaf
# 1965 )
# Create another axis for the width and position parameters
# Share the x axis with axes ax1
ax3 = fig.add_subplot(111)
# Plot the 3 lines
# !!! fill_between does not work with a datetime x (first element);
# leaving as is for now
# ax3.fill_between(np.asarray(bwdf['time']),
# np.asarray(bwdf['beach_width_at_MLW']),
# np.asarray(bwdf['beach_width_at_MHW']),
# alpha=0.3,
# color='black')
if (is_not_empty(bwdf['beach_width_at_MHW']) or
is_not_empty(bwdf['beach_width_at_MLW'])):
ax3.plot(bwdf['time'], bwdf['beach_width_at_MHW'],
label='strandbreedte MHW', **props)
ax3.plot(bwdf['time'], bwdf['beach_width_at_MLW'],
label='strandbreedte MLW', **props)
# ax3.plot(date2num(dfdf['time']), dfdf['dune_foot_upperMKL_cross'],
# label='Duinvoet (BKL-schijf)', **props)
# Dune foot is position but relative to RSP, so we can call it a width
# Look up the location of the tick labels, because we're removing all but
# the first and last.
locs = [ax3.yaxis.get_ticklocs()[0], ax3.yaxis.get_ticklocs()[-1]]
# We don't want too much cluttering
ax3.yaxis.set_ticks(locs)
ax3.yaxis.grid(False)
# Again remove the xaxis labels
try:
for label in ax3.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
# Place the legend
ax3.legend(loc='upper left')
ax3.set_ylabel('Breedte [m]')
return fig
def test_plot_ax4(dfs):
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=(7, 9))
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
# Figuur 4 uitgevoerde suppleties, tekst bij de as bij voorkeur
# Volume (m3/m)
# Create the third axes, again sharing the x-axis
ax4 = fig.add_subplot(111)
date2num = matplotlib.dates.date2num
# We need to store labels and a "proxy artist".
proxies = []
labels = []
# Loop over eadge row, because we need to look up colors (bit of a hack)
for i, row in nourishmentdf.iterrows():
# Look up the color based on the type of nourishment
color = beachcolors[typemap[row['type'].strip()]]
# Strip spaces
label = row['type'].strip()
# Common properties
ax4_props = dict(alpha=0.7, linewidth=2)
# Plot a bar per nourishment
ax4.fill_betweenx([0, row['volm']], date2num(row['beg_date']),
date2num(row['end_date']), edgecolor=color,
color=color, **ax4_props)
if label not in labels:
# Fill_between's are not added with labels.
# So we'll create a proxy artist (a non plotted rectangle, with
# the same color)
# http://matplotlib.org/users/legend_guide.html
proxy = matplotlib.patches.Rectangle((0, 0), 1, 1,
facecolor=color, **ax4_props)
proxy.set_label(label)
proxies.append(proxy)
labels.append(label)
# Only use first and last tick label
locs = [ax4.yaxis.get_ticklocs()[0], ax4.yaxis.get_ticklocs()[-1]]
ax4.yaxis.set_ticks(locs)
ax4.yaxis.grid(False)
ax4.set_ylabel('Volume [m3/m]')
# Again remove the xaxis labels
try:
for label in ax4.xaxis.get_ticklabels():
label.set_visible(False)
except ValueError:
# No dates no problem
pass
# Place the legend
ax4.legend(proxies, labels, loc='upper left')
return fig
def test_plot_ax5(dfs):
shorelinedf = dfs['shorelinedf']
transectdf = dfs['transectdf']
nourishmentdf = dfs['nourishmentdf']
mkldf = dfs['mkldf']
bkldf = dfs['bkldf']
bwdf = dfs['bwdf']
dfdf = dfs['dfdf']
dunefaildf = dfs['dunefaildf']
transect = transectdf['transect'].irow(0)
areaname = transectdf['areaname'].irow(0)
# Plot the results.
fig = plt.figure(figsize=(7, 9))
# Some common style properties, also store they style information file for
# ggplot style in the directory where the script is.
props = dict(linewidth=1, alpha=0.7, markeredgewidth=0, markersize=8,
linestyle='-', marker='.')
date2num = matplotlib.dates.date2num
# Figuur 5 Faalkans eerste duinrij (laatste figuur onderaan)
# Sharing the x-axis
ax5 = fig.add_subplot(111)
if is_not_empty(dunefaildf['probability_failure']):
ax5.plot(date2num(dunefaildf['time']),
dunefaildf['probability_failure'],
label='faalkans eerste duinrij', **props)
# This one we want to see
ax5.xaxis.set_visible(True)
# Only use first and last tick label
locs = [ax5.yaxis.get_ticklocs()[0], ax5.yaxis.get_ticklocs()[-1]]
ax5.yaxis.set_ticks(locs)
ax5.yaxis.grid(False)
ax5.set_yscale('log')
ax5.set_xlabel('Tijd [jaren]')
# Show dates at decenia
locator = matplotlib.dates.YearLocator(base=25)
ax5.xaxis.set_major_locator(locator)
ax5.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y'))
# Now we plot the proxies with corresponding legends.
ax5.legend(loc='best')
ax5.set_ylabel('Kans [1/jr]')
return fig
if __name__ == '__main__':
print (
'Be sure to run using [buildout_dir]/bin/python if you want'
' to test using the same libraries as the site'
)
try:
from lizard_kml.jarkus.matplotlib_settings import \
set_matplotlib_defaults
except ImportError:
# import locally anyway
from matplotlib_settings import set_matplotlib_defaults
set_matplotlib_defaults()
try:
from lizard_kml.jarkus.nc_models import makedfs
except ImportError:
# import locally anyway
from nc_models import makedfs
transects = [
7005000, # working
7005475, # no data for ax3 and ax4 figure (returns all-else-fails)
9010047, # has very few data (should also work)
8008100,
]
for transect in transects:
dfs = makedfs(transect)
try:
fig1 = test_plot_ax1(dfs)
fig1.savefig('nourishment-%s-ax1.png' % transect)
except ValueError, e:
print "ERROR - axis 1 for %s fails: %s" % (transect, e)
try:
fig2 = test_plot_ax2(dfs)
fig2.savefig('nourishment-%s-ax2.png' % transect)
except ValueError, e:
print "ERROR - axis 2 for %s fails: %s" % (transect, e)
try:
fig3 = test_plot_ax3(dfs)
fig3.savefig('nourishment-%s-ax3.png' % transect)
except ValueError, e:
print "ERROR - axis 3 for %s fails: %s" % (transect, e)
try:
fig4 = test_plot_ax4(dfs)
fig4.savefig('nourishment-%s-ax4.png' % transect)
except ValueError, e:
print "ERROR - axis 4 for %s fails: %s" % (transect, e)
try:
fig5 = test_plot_ax5(dfs)
fig5.savefig('nourishment-%s-ax5.png' % transect)
except ValueError, e:
print "ERROR - axis 5 for %s fails: %s" % (transect, e)
try:
fig_comb = combinedplot(dfs)
fig_comb.savefig('nourishment-%s-combined.png' % transect)
except ValueError, e:
print("ERROR - combined for %s fails: %s. Saving 'all-else-fails'"
" (aef) plot." % (transect, e))
all_else_fails_fig = all_else_fails_plot(dfs)
all_else_fails_fig.savefig('nourishment-%s-aef.png' % transect)
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/units/units_scatter.py | 1 | 1573 | """
=============
Unit handling
=============
The example below shows support for unit conversions over masked
arrays.
.. only:: builder_html
This example requires :download:`basic_units.py <basic_units.py>`
"""
import numpy as np
import matplotlib.pyplot as plt
from basic_units import secs, hertz, minutes
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=300):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# create masked array
data = (1, 2, 3, 4, 5, 6, 7, 8)
mask = (1, 0, 1, 0, 0, 0, 1, 0)
xsecs = secs * np.ma.MaskedArray(data, mask, float)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
ax1.scatter(xsecs, xsecs)
ax1.yaxis.set_units(secs)
ax1.axis([0, 10, 0, 10])
ax2.scatter(xsecs, xsecs, yunits=hertz)
ax2.axis([0, 10, 0, 1])
ax3.scatter(xsecs, xsecs, yunits=hertz)
ax3.yaxis.set_units(minutes)
ax3.axis([0, 10, 0, 1])
fig.tight_layout()
pltshow(plt)
| mit |
xiaoxiamii/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
DavidPost-1/PyHDFView | pyhdfview.py | 1 | 15407 | # -*- coding: utf-8 -*-
"""
This file is part of PyHDFView.
PyHDFView is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyHDFView is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyHDFView. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from PyQt4 import QtCore, QtGui
import functools as ft
import h5py
import _window_classes as wc
plt.style.use('bmh')
class mainWindow(QtGui.QMainWindow):
def __init__(self):
super(mainWindow, self).__init__()
self.filename = ''
self.text = ''
self.values = np.array([])
self.current_dataset = ''
self.file_items = []
self.recent_files_path = 'recent_files.txt'
self.initialise_user_interface()
def initialise_user_interface(self):
'''
Initialises the main window. '''
grid = QtGui.QGridLayout()
grid.setSpacing(10)
# File structure list and dataset table
self.file_items_list = wc.titledTree('File Tree')
self.file_items_list.list.itemClicked.connect(self.item_clicked)
self.file_items_list.list.itemExpanded.connect(self.file_items_list.swap_group_icon)
self.file_items_list.list.itemCollapsed.connect(self.file_items_list.swap_group_icon)
self.file_items_list.list.setMinimumWidth(250)
# Make dataset table
self.dataset_table = wc.titledTable('Values')
self.dataset_table.table.setMinimumWidth(350)
# Make attribute table
self.attribute_table = QtGui.QTableWidget()
self.attribute_table.setShowGrid(True)
# Initialise all buttons
self.general_buttons = self.initialise_general_buttons()
self.dataset_buttons = self.initialise_dataset_buttons()
# Set maximum widths when the window is resized.
self.resizeEvent = self.onresize
# Add 'extra' window components
self.make_menu_bar()
self.make_recent_files_menu()
self.filename_label = QtGui.QLabel('')
# Add the created layouts and widgets to the window
grid.addLayout(self.general_buttons, 1, 0, QtCore.Qt.AlignLeft)
grid.addLayout(self.dataset_buttons, 1, 1, QtCore.Qt.AlignLeft)
grid.addWidget(self.filename_label, 2, 0)
grid.addLayout(self.file_items_list.layout, 3, 0)
grid.addLayout(self.dataset_table.layout, 3, 1)
grid.addWidget(self.attribute_table, 4, 0, 1, 2)
self.setCentralWidget(QtGui.QWidget(self))
self.centralWidget().setLayout(grid)
# Other tweaks to the window such as icons etc
self.setWindowTitle('PyHDFView')
#QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Cleanlooks'))
def onresize(self, event):
self.file_items_list.list.setMaximumWidth(0.7*self.width())
self.file_items_list.list.setMaximumWidth(0.3*self.width())
self.attribute_table.setMaximumHeight(0.3*self.height())
def initialise_general_buttons(self):
'''
Initialises the buttons in the button bar at the top of the main window. '''
open_file_btn = QtGui.QPushButton('Open')
open_file_btn.clicked.connect(self.choose_file)
button_section = QtGui.QHBoxLayout()
button_section.addWidget(open_file_btn)
#button_section.addStretch(0)
return button_section
def initialise_dataset_buttons(self):
self.plot_btn = QtGui.QPushButton('Plot')
self.plot_btn.clicked.connect(self.plot_graph)
self.plot_btn.hide()
button_section = QtGui.QHBoxLayout()
button_section.addWidget(self.plot_btn)
return button_section
def make_menu_bar(self):
'''
Initialises the menu bar at the top. '''
menubar = self.menuBar()
# Create a File menu and add an open button
self.file_menu = menubar.addMenu('&File')
open_action = QtGui.QAction('&Open', self)
open_action.setShortcut('Ctrl+o')
open_action.triggered.connect(self.choose_file)
self.file_menu.addAction(open_action)
# Add the recent files menu, but don't populate it yet
self.recent_files_menu = self.file_menu.addMenu('&Recent Files')
# Add an exit button to the file menu
exit_action = QtGui.QAction('&Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(QtGui.qApp.quit)
self.file_menu.addAction(exit_action)
# Create a Help menu and add an about button
help_menu = menubar.addMenu('&Help')
about_action = QtGui.QAction('About PyHDFView', self)
about_action.setStatusTip('About this program')
about_action.triggered.connect(self.show_about_menu)
help_menu.addAction(about_action)
def show_about_menu(self):
'''
Shows the about menu by initialising an about_window object. This class is described in _window_classes.py '''
self.about_window = wc.aboutWindow()
self.about_window.show()
def make_recent_files_menu(self):
'''
Reads recent files from the recent_files.txt file and adds items to the
Recent Files menu.
'''
# Open the recent_files file and load in the filename
lines = self.read_recent_files_file()
self.recent_files_list = []
for i in range(len(lines)):
# make a new menu item
self.recent_files_list.append(QtGui.QAction(lines[i], self))
self.recent_files_list[i].triggered.connect(ft.partial(self.open_recent_file, lines[i]))
# add it to the menu
self.recent_files_menu.addAction(self.recent_files_list[i])
def recent_files_reset(self):
'''
When a user opens a new file we have to reset the recent files list
in the menu, and also update the recent files file.
'''
self.recent_files_menu.clear()
# Open the recent_files file and load in the filename
lines = self.read_recent_files_file()
num_appearances = lines.count(self.filename)
if num_appearances > 0:
for i in range(num_appearances):
in_position = lines.index(self.filename)
lines.pop(in_position)
# Insert the current filename the first item in the list
lines.insert(0, self.filename)
self.write_recent_files_file(lines)
self.make_recent_files_menu()
def open_recent_file(self, filename):
'''
Function to run when a recent file is clicked
'''
self.initiate_file_open(filename)
def read_recent_files_file(self):
lines = []
try:
with open(self.recent_files_path, 'r') as rf:
for i in rf:
if len(i) > 5:
item = i.replace('\n', '')
lines.append(item)
except FileNotFoundError:
temp_file = open(self.recent_files_path, 'w')
temp_file.close()
return lines
def write_recent_files_file(self, lines):
'''
Function to write the recent files list to recent_files.txt
just writes lines to a file up to a max number
'''
max_recent_files = 5
with open(self.recent_files_path, 'w') as rf:
for i in lines[0:max_recent_files]:
rf.write(i+'\n')
def choose_file(self):
'''
Opens a QFileDialog window to allow the user to choose the hdf5 file they would like to view. '''
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
'/home', filter='*.hdf5 *.h5')
self.initiate_file_open(filename)
def initiate_file_open(self, filename):
self.filename = filename
self.recent_files_reset()
self.clear_file_items()
self.dataset_table.clear()
self.attribute_table.clear()
try:
self.open_file(filename)
self.populate_file_file_items_list()
self.filename_label.setText(filename.split('/')[-1])
self.setWindowTitle('PyHDFView - ' + filename)
except:
self.filename = '' # if it didn't work keep the old value
self.filename_label.setText('')
self.setWindowTitle('PyHDFView')
self.clear_file_items()
self.dataset_table.clear()
self.attribute_table.clear()
print("Error opening file")
def open_file(self, filename):
'''
Opens the chosen HDF5 file. '''
self.hdf5_file = h5py.File(filename, 'r')
def find_items(self, hdf_group):
'''
Recursive function for all nested groups and datasets. Populates self.file_items.'''
file_items = []
for i in hdf_group.keys():
file_items.append(hdf_group[i].name)
if isinstance(hdf_group[i], h5py.Group):
a = self.find_items(hdf_group[i])
if len(a) >= 1:
file_items.append(a)
return file_items
def clear_file_items(self):
self.file_items = []
self.file_items_list.clear()
def add_item_to_file_list(self, items, item_index, n):
item_list = items[item_index]
for i in range(len(item_list)):
if isinstance(item_list[i], str):
self.file_items_list.add_item(n, item_list[i], self.hdf5_file)
else:
self.add_item_to_file_list(item_list, i, n+i)
def populate_file_file_items_list(self):
'''
Function to populate the file structure list on the main window.
'''
# Find all of the items in this file
file_items = self.find_items(self.hdf5_file)
self.file_items = file_items
# Add these items to the file_items_list.
# For clarity only the item name is shown, not the full path.
# Arrows are used to suggest that an item is contained.
for i in range(len(self.file_items)):
if isinstance(self.file_items[i], str):
self.file_items_list.add_item(None, self.file_items[i], self.hdf5_file)
else:
self.add_item_to_file_list(self.file_items, i, i-1)
def plot_graph(self):
'''
Plots the data that is currently shown in the dataset table. Currently opens a matplotlib figure and shows using the users current backend.'''
#self.a = wc.plotOptionWindow()
#self.a.show()
selected_items = self.dataset_table.table.selectedItems()
if len(selected_items) > 0:
min_row = selected_items[0].row()
max_row = selected_items[-1].row() + 1
min_col = selected_items[0].column()
max_col = selected_items[-1].column() + 1
else:
shape = np.shape(self.values)
if len(shape) == 1:
max_col = 1
else:
max_col = shape[1]
min_row = 0
max_row = shape[0]
min_col = 0
plt.ion()
plt.close('all')
if len(self.values) > 0: # for 2d data each plot col by col
if len(np.shape(self.values)) > 1:
plt.figure()
for i in range(min_row, max_row):
plt.plot(self.values[i, min_col:max_col], '-o', label=str(i))
plt.legend(loc=0)
plt.show()
else: # for 1d data we plot a row
plt.figure()
plt.plot(self.values[min_row:max_row], '-o')
plt.show()
def display_dataset(self):
selected_row = self.file_items_list.list.currentItem()
text = self.file_items_list.full_item_path(selected_row)
# We first want to find out whether this is a new item
# or if they have double clicked the same item as before.
# If it is a new item, and that item corresponds to a dataset
# then repopulate the table and the self.values variable.
# Otherwise clear the table and self.values, and hide the plot button.
if (not text == self.current_dataset) and isinstance(self.hdf5_file[text], h5py.Dataset):
self.current_dataset = text
self.values = self.hdf5_file[text].value
if len(self.values) > 0: # If the dataset is not empty
self.plot_btn.show()
self.dataset_table.clear()
numrows = len(self.values)
numcols = self.dataset_table.num_cols(self.values)
self.dataset_table.table.setRowCount(numrows)
self.dataset_table.table.setColumnCount(numcols)
for i in range(numrows):
if numcols > 1:
for j in range(numcols):
self.dataset_table.set_item(i, j, str(self.values[i,j]))
else:
self.dataset_table.set_item(i, 0, str(self.values[i]))
elif isinstance(self.hdf5_file[text], h5py.Group):
self.current_dataset = text
self.dataset_table.clear()
self.values = np.array([])
self.plot_btn.hide()
def display_attributes(self):
# reset the value
self.attribute_table.clear()
# Find the path of the selected item and extract attrs
selected_row = self.file_items_list.list.currentItem()
path = self.file_items_list.full_item_path(selected_row)
attributes = list(self.hdf5_file[path].attrs.items())
num_attributes = len(attributes)
# Prepare the table by setting the appropriate row number
self.attribute_table.setRowCount(num_attributes)
self.attribute_table.setColumnCount(0)
if num_attributes > 0:
self.attribute_table.setColumnCount(2)
# Populate the table
for i in range(num_attributes):
self.attribute_table.setItem(i, 0, QtGui.QTableWidgetItem(attributes[i][0]))
value = attributes[i][1]
# h5py gives strings come as encoded numpy arrays,
# extract if necessary.
if isinstance(value, np.ndarray):
self.attribute_table.setItem(i, 1, QtGui.QTableWidgetItem(str(value[0].decode())))
else:
self.attribute_table.setItem(i, 1, QtGui.QTableWidgetItem(str(value)))
def item_double_clicked(self):
'''
Responds to a double click on an item in the file_items_list.'''
# self.display_dataset()
def item_clicked(self):
self.display_attributes()
self.display_dataset()
def main():
app = QtGui.QApplication(sys.argv)
pyhdfview_window = mainWindow()
pyhdfview_window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 |
bradmontgomery/ml | book/ch07/boston_cv_penalized.py | 24 | 1381 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script fits several forms of penalized regression
from __future__ import print_function
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge
from sklearn.metrics import r2_score
from sklearn.datasets import load_boston
boston = load_boston()
x = boston.data
y = boston.target
for name, met in [
('linear regression', LinearRegression()),
('lasso()', Lasso()),
('elastic-net(.5)', ElasticNet(alpha=0.5)),
('lasso(.5)', Lasso(alpha=0.5)),
('ridge(.5)', Ridge(alpha=0.5)),
]:
# Fit on the whole data:
met.fit(x, y)
# Predict on the whole data:
p = met.predict(x)
r2_train = r2_score(y, p)
# Now, we use 10 fold cross-validation to estimate generalization error
kf = KFold(len(x), n_folds=5)
p = np.zeros_like(y)
for train, test in kf:
met.fit(x[train], y[train])
p[test] = met.predict(x[test])
r2_cv = r2_score(y, p)
print('Method: {}'.format(name))
print('R2 on training: {}'.format(r2_train))
print('R2 on 5-fold CV: {}'.format(r2_cv))
print()
print()
| mit |
mmottahedi/neuralnilm_prototype | scripts/e297.py | 2 | 10746 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.5,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
2000: 5e-04,
5000: 1e-04,
7000: 5e-05
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
]
)
def exp_a(name):
# 5 appliances
# avg valid cost = 1.1260980368
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
# one pool layer
# avg valid cost = 1.2261329889
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# BLSTM
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# BLSTM 2x2x pool
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abcde')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=4000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
Aasmi/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
jigargandhi/UdemyMachineLearning | Machine Learning A-Z Template Folder/Part 9 - Dimensionality Reduction/Section 45 - Kernel PCA/j_kernel_pca.py | 1 | 2906 | # -*- coding: utf-8 -*-
#Note: LDA considers dependent variable and hence it is supervised
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#importing libraries
dataset= pd.read_csv("Social_Network_Ads.csv")
X= dataset.iloc[:,[2,3]].values
y = dataset.iloc[:,4].values
#split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 0)
#Note: Feature Scaling must be applied when doing feature extraction using PCA/LDA/ Kernel PCA
#feature scaling
from sklearn.preprocessing import StandardScaler
sc_X= StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2, kernel= 'rbf')
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
#logistic regression
from sklearn.linear_model import LogisticRegression
# since our classifier is a linear classifier the prediction boundary will be a straight line
# this is the most simplest classifier
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train,y_train)
#prediction
y_pred= classifier.predict(X_test)
#making confusion matrix
from sklearn.metrics import confusion_matrix
cm= confusion_matrix(y_test, y_pred)
#visualizing
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green','blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LDA1')
plt.ylabel('LDA2')
plt.legend()
plt.show()
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LDA1')
plt.ylabel('LDA2')
plt.legend()
plt.show() | mit |
cengelif/Relevancer | relevancerdb.py | 1 | 4719 | # import argparse
import relevancer as rlv
import pandas as pd
from bson.objectid import ObjectId
# parser = argparse.ArgumentParser(description='Cluster tweets of a certain collection')
# parser.add_argument('-c', '--collection', type=str, required=True, help='collection name of the tweets')
# args = parser.parse_args()
my_token_pattern = r"[#@]?\w+\b|[\U00010000-\U0010ffff]"
collection = 'all_data' # 'flood'
rlvdb, rlvcl = rlv.connect_mongodb(configfile='myalldata.ini', coll_name=collection) # Db and the collection that contains the tweet set to be annotated.
begin = ObjectId('55cd9edc78300a0b48354fbd') # 55950fb4d04475ee9867f3a4
end = ObjectId('55d4448aa4c41a84e4a83341') # 55950fc9d04475ee986841c3
# tweetlist = rlv.read_json_tweets_database(rlvcl, mongo_query={}, tweet_count=3000, reqlang='en')
# tweetlist = rlv.read_json_tweets_database(rlvcl, mongo_query={'_id': {'$gte': begin, '$lte': end}}, tweet_count=10000, reqlang='en')
# This list is just for test.
# annotated_tw_ids = ['563829354258788352', ' 564030861226430464', ' 564013764614168576', '564021392891318274', '563657483395530753', '563654330041909248', ' 563657924233289728', '563651950386757632', '563660271810383872'] # You should get the actual annotated tweet ids from the annotated tweets collection.
annotated_tw_ids = ['631754887861112832', ' 631754821859700736', ' 631754771183988737', '631754761595973632', '631754703357906944', '631754719350931456', ' 631754609120387072', '631754601918763008', '632104500573003776']
tweetlist = rlv.read_json_tweet_fields_database(rlvcl, mongo_query=({'_id': {'$gte': begin, '$lte': end}, 'lang': 'en'}), tweet_count=48524, annotated_ids=annotated_tw_ids)
rlv.logging.info("Number of tweets:" + str(len(tweetlist)))
# print(len(tweetlist))
tweetsDF = rlv.create_dataframe(tweetlist)
tok = rlv.tok_results(tweetsDF, elimrt=True)
start_tweet_size = len(tweetsDF)
rlv.logging.info("\nNumber of the tweets after retweet elimination:" + str(start_tweet_size))
tw_id_list = rlv.get_ids_from_tw_collection(rlvcl)
print("Length of the tweet ids and the first then ids", len(tw_id_list), tw_id_list[:10])
tst_https = tweetsDF[tweetsDF.text.str.contains("https")] # ["text"]
tst_http = tweetsDF[tweetsDF.text.str.contains("http:")] # ["text"]
tstDF = tst_http
tstDF = rlv.normalize_text(tstDF)
print(tstDF["text"]) # .iloc[10])
rlv.logging.info("This text overwritten by tokenizer" + str(tstDF["text"]))
print("normalization:", tstDF["active_text"]) # .iloc[10])
rlv.logging.info("This text overwritten by normalization" + str(tstDF["active_text"]))
find_distance = rlv.get_and_eliminate_near_duplicate_tweets(tweetsDF)
cluster_list = rlv.create_clusters(tweetsDF, my_token_pattern, nameprefix='1-') # those comply to selection criteria
# cluster_list2 = rlv.create_clusters(tweetsDF, selection=False) # get all clusters. You can consider it at the end.
print(len(cluster_list))
a_cluster = cluster_list[0]
print("cluster_no", a_cluster['cno'])
print("cluster_str", a_cluster['cstr'])
print("cluster_tweet_ids", a_cluster['twids'])
print("cluster_freq", a_cluster['rif'])
print("cluster_prefix", a_cluster['cnoprefix'])
print("cluster_tuple_list", a_cluster['ctweettuplelist'])
print("cluster_entropy", a_cluster['user_entropy'])
collection_name = collection + '_clusters'
rlvdb[collection_name].insert(cluster_list) # Each iteration results with a candidate cluster list. Each iteration will have its own list. Therefore they are not mixed.
print("Clusters were written to the collection:", collection_name)
# After excluding tweets that are annotated, you should do the same iteration as many times as the user would like.
# You can provide a percentage of annotated tweets to inform about how far is the user in annotation.
tweets_as_text_label_df = pd.DataFrame({'label': ['relif', 'social'], 'text': ["RT @OliverMathenge: Meanwhile, Kenya has donated Sh91 million to Malawi flood victims, according to the Ministry of Foreign Affairs.", "Yow ehyowgiddii! Hahaha thanks sa flood! #instalike http://t.co/mLaTESfunR"]})
print("tweets_as_text_label_df:", tweets_as_text_label_df)
# get vectorizer and classifier
vect_and_classifier = rlv.get_vectorizer_and_mnb_classifier(tweets_as_text_label_df, my_token_pattern, pickle_file="vectorizer_and_classifier_dict")
vectorizer, mnb_classifier = vect_and_classifier["vectorizer"], vect_and_classifier["classifier"]
# get label for a new tweet:
ntw = vectorizer.transform(["Why do you guys keep flooding TL with smear campaign for a candidate you dont like.You think you can actually influnece people's decision?"])
predictions = mnb_classifier.predict(ntw)
print("Predictions:", predictions)
rlv.logging.info('\nscript finished')
| mit |
JeanKossaifi/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
jaeilepp/mne-python | mne/viz/tests/test_misc.py | 1 | 6300 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import (read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate)
from mne.datasets import testing
from mne.filter import create_filter
from mne.io import read_raw_fif
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate, plot_filter)
from mne.utils import (requires_nibabel, run_tests_if_main, slow_test,
requires_version)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=True)
def _get_events():
"""Get events."""
return read_events(event_fname)
@requires_version('scipy', '0.16')
def test_plot_filter():
"""Test filter plotting."""
import matplotlib.pyplot as plt
l_freq, h_freq, sfreq = 2., 40., 1000.
data = np.zeros(5000)
freq = [0, 2, 40, 50, 500]
gain = [0, 1, 1, 0, 0]
h = create_filter(data, sfreq, l_freq, h_freq, fir_design='firwin2')
plot_filter(h, sfreq)
plt.close('all')
plot_filter(h, sfreq, freq, gain)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq, method='iir')
plot_filter(iir, sfreq)
plt.close('all')
plot_filter(iir, sfreq, freq, gain)
plt.close('all')
iir_ba = create_filter(data, sfreq, l_freq, h_freq, method='iir',
iir_params=dict(output='ba'))
plot_filter(iir_ba, sfreq, freq, gain)
plt.close('all')
plot_filter(h, sfreq, freq, gain, fscale='linear')
plt.close('all')
def test_plot_cov():
"""Test plotting of covariances."""
raw = _get_raw()
cov = read_cov(cov_fname)
with warnings.catch_warnings(record=True): # bad proj
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours."""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', slices=[25, 50],
brain_surfaces='white')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', slices=[25, 50], src=src_fname)
def test_plot_events():
"""Test plotting events."""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram."""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@slow_test
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate."""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes."""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
run_tests_if_main()
| bsd-3-clause |
mlskit/astromlskit | FRONTEND/pyroc.py | 2 | 12161 | #!/usr/bin/env python
# encoding: utf-8
"""
PyRoc.py
Created by Marcel Caraciolo on 2009-11-16.
Copyright (c) 2009 Federal University of Pernambuco. All rights reserved.
IMPORTANT:
Based on the original code by Eithon Cadag (http://www.eithoncadag.com/files/pyroc.txt)
Python Module for calculating the area under the receive operating characteristic curve, given a dataset.
0.1 - First Release
0.2 - Updated the code by adding new metrics for analysis with the confusion matrix.
"""
import random
import math
try:
import pylab
except:
print "error:\tcan't import pylab module, you must install the module:\n"
print "\tmatplotlib to plot charts!'\n"
def random_mixture_model(pos_mu=.6,pos_sigma=.1,neg_mu=.4,neg_sigma=.1,size=200):
pos = [(1,random.gauss(pos_mu,pos_sigma),) for x in xrange(size/2)]
neg = [(0,random.gauss(neg_mu,neg_sigma),) for x in xrange(size/2)]
return pos+neg
def plot_multiple_rocs_separate(rocList,title='', labels = None, equal_aspect = True):
""" Plot multiples ROC curves as separate at the same painting area. """
pylab.clf()
pylab.title(title)
for ix, r in enumerate(rocList):
ax = pylab.subplot(4,4,ix+1)
pylab.ylim((0,1))
pylab.xlim((0,1))
ax.set_yticklabels([])
ax.set_xticklabels([])
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
if not labels:
labels = ['' for x in rocList]
pylab.text(0.2,0.1,labels[ix],fontsize=8)
pylab.plot([x[0] for x in r.derived_points],[y[1] for y in r.derived_points], 'r-',linewidth=2)
pylab.show()
def _remove_duplicate_styles(rocList):
""" Checks for duplicate linestyles and replaces duplicates with a random one."""
pref_styles = ['cx-','mx-','yx-','gx-','bx-','rx-']
points = 'ov^>+xd'
colors = 'bgrcmy'
lines = ['-','-.',':']
rand_ls = []
for r in rocList:
if r.linestyle not in rand_ls:
rand_ls.append(r.linestyle)
else:
while True:
if len(pref_styles) > 0:
pstyle = pref_styles.pop()
if pstyle not in rand_ls:
r.linestyle = pstyle
rand_ls.append(pstyle)
break
else:
ls = ''.join(random.sample(colors,1) + random.sample(points,1)+ random.sample(lines,1))
if ls not in rand_ls:
r.linestyle = ls
rand_ls.append(ls)
break
def plot_multiple_roc(rocList,title='',labels=None, include_baseline=False, equal_aspect=True):
""" Plots multiple ROC curves on the same chart.
Parameters:
rocList: the list of ROCData objects
title: The tile of the chart
labels: The labels of each ROC curve
include_baseline: if it's True include the random baseline
equal_aspect: keep equal aspect for all roc curves
"""
pylab.clf()
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel("1 - Specificity")
pylab.ylabel("Sensitivity")
pylab.title(title)
if not labels:
labels = [ '' for x in rocList]
_remove_duplicate_styles(rocList)
for ix, r in enumerate(rocList):
pylab.plot([x[0] for x in r.derived_points], [y[1] for y in r.derived_points], r.linestyle, linewidth=1, label=labels[ix])
if include_baseline:
pylab.plot([0.0,1.0], [0.0, 1.0], 'k-', label= 'random')
if labels:
pylab.legend(loc='lower right')
pylab.show()
def load_decision_function(path):
""" Function to load the decision function (DataSet)
Parameters:
path: The dataset file path
Return:
model_data: The data modeled
"""
fileHandler = open(path,'r')
reader = fileHandler.readlines()
reader = [line.strip().split() for line in reader]
model_data = []
for line in reader:
if len(line) == 0: continue
fClass,fValue = line
model_data.append((int(fClass), float(fValue)))
fileHandler.close()
return model_data
class ROCData(object):
""" Class that generates an ROC Curve for the data.
Data is in the following format: a list l of tutples t
where:
t[0] = 1 for positive class and t[0] = 0 for negative class
t[1] = score
t[2] = label
"""
def __init__(self,data,linestyle='rx-'):
""" Constructor takes the data and the line style for plotting the ROC Curve.
Parameters:
data: The data a listl of tuples t (l = [t_0,t_1,...t_n]) where:
t[0] = 1 for positive class and 0 for negative class
t[1] = a score
t[2] = any label (optional)
lineStyle: THe matplotlib style string for plots.
Note: The ROCData is still usable w/o matplotlib. The AUC is still available,
but plots cannot be generated.
"""
self.data = sorted(data,lambda x,y: cmp(y[1],x[1]))
self.linestyle = linestyle
self.auc() #Seed initial points with default full ROC
def auc(self,fpnum=0):
""" Uses the trapezoidal ruel to calculate the area under the curve. If fpnum is supplied, it will
calculate a partial AUC, up to the number of false positives in fpnum (the partial AUC is scaled
to between 0 and 1).
It assumes that the positive class is expected to have the higher of the scores (s(+) < s(-))
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
"""
fps_count = 0
relevant_pauc = []
current_index = 0
max_n = len([x for x in self.data if x[0] == 0])
if fpnum == 0:
relevant_pauc = [x for x in self.data]
elif fpnum > max_n:
fpnum = max_n
#Find the upper limit of the data that does not exceed n FPs
else:
while fps_count < fpnum:
relevant_pauc.append(self.data[current_index])
if self.data[current_index][0] == 0:
fps_count += 1
current_index +=1
total_n = len([x for x in relevant_pauc if x[0] == 0])
total_p = len(relevant_pauc) - total_n
#Convert to points in a ROC
previous_df = -1000000.0
current_index = 0
points = []
tp_count, fp_count = 0.0 , 0.0
tpr, fpr = 0, 0
while current_index < len(relevant_pauc):
df = relevant_pauc[current_index][1]
if previous_df != df:
points.append((fpr,tpr,fp_count))
if relevant_pauc[current_index][0] == 0:
fp_count +=1
elif relevant_pauc[current_index][0] == 1:
tp_count +=1
fpr = fp_count/total_n
tpr = tp_count/total_p
previous_df = df
current_index +=1
points.append((fpr,tpr,fp_count)) #Add last point
points.sort(key=lambda i: (i[0],i[1]))
self.derived_points = points
return self._trapezoidal_rule(points)
def _trapezoidal_rule(self,curve_pts):
""" Method to calculate the area under the ROC curve"""
cum_area = 0.0
for ix,x in enumerate(curve_pts[0:-1]):
cur_pt = x
next_pt = curve_pts[ix+1]
cum_area += ((cur_pt[1]+next_pt[1])/2.0) * (next_pt[0]-cur_pt[0])
return cum_area
def calculateStandardError(self,fpnum=0):
""" Returns the standard error associated with the curve.
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
the standard error.
"""
area = self.auc(fpnum)
#real positive cases
Na = len([ x for x in self.data if x[0] == 1])
#real negative cases
Nn = len([ x for x in self.data if x[0] == 0])
Q1 = area / (2.0 - area)
Q2 = 2 * area * area / (1.0 + area)
return math.sqrt( ( area * (1.0 - area) + (Na - 1.0) * (Q1 - area*area) +
(Nn - 1.0) * (Q2 - area * area)) / (Na * Nn))
def plot(self,title='',include_baseline=False,equal_aspect=True):
""" Method that generates a plot of the ROC curve
Parameters:
title: Title of the chart
include_baseline: Add the baseline plot line if it's True
equal_aspect: Aspects to be equal for all plot
"""
pylab.clf()
pylab.plot([x[0] for x in self.derived_points], [y[1] for y in self.derived_points], self.linestyle)
if include_baseline:
pylab.plot([0.0,1.0], [0.0,1.0],'k-.')
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel('1 - Specificity')
pylab.ylabel('Sensitivity')
pylab.title(title)
pylab.show()
def confusion_matrix(self,threshold,do_print=False):
""" Returns the confusion matrix (in dictionary form) for a fiven threshold
where all elements > threshold are considered 1 , all else 0.
Parameters:
threshold: threshold to check the decision function
do_print: if it's True show the confusion matrix in the screen
Return:
the dictionary with the TP, FP, FN, TN
"""
pos_points = [x for x in self.data if x[1] >= threshold]
neg_points = [x for x in self.data if x[1] < threshold]
tp,fp,fn,tn = self._calculate_counts(pos_points,neg_points)
if do_print:
print "\t Actual class"
print "\t+(1)\t-(0)"
print "+(1)\t%i\t%i\tPredicted" % (tp,fp)
print "-(0)\t%i\t%i\tclass" % (fn,tn)
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn}
def evaluateMetrics(self,matrix,metric=None,do_print=False):
""" Returns the metrics evaluated from the confusion matrix.
Parameters:
matrix: the confusion matrix
metric: the specific metric of the default value is None (all metrics).
do_print: if it's True show the metrics in the screen
Return:
the dictionary with the Accuracy, Sensitivity, Specificity,Efficiency,
PositivePredictiveValue, NegativePredictiveValue, PhiCoefficient
"""
accuracy = (matrix['TP'] + matrix['TN'])/ float(sum(matrix.values()))
sensitivity = (matrix['TP'])/ float(matrix['TP'] + matrix['FN'])
specificity = (matrix['TN'])/float(matrix['TN'] + matrix['FP'])
efficiency = (sensitivity + specificity) / 2.0
positivePredictiveValue = matrix['TP'] / float(matrix['TP'] + matrix['FP'])
NegativePredictiveValue = matrix['TN'] / float(matrix['TN'] + matrix['FN'])
PhiCoefficient = (matrix['TP'] * matrix['TN'] - matrix['FP'] * matrix['FN'])/(
math.sqrt( (matrix['TP'] + matrix['FP']) *
(matrix['TP'] + matrix['FN']) *
(matrix['TN'] + matrix['FP']) *
(matrix['TN'] + matrix['FN']))) or 1.0
if do_print:
print 'Sensitivity: ' , sensitivity
print 'Specificity: ' , specificity
print 'Efficiency: ' , efficiency
print 'Accuracy: ' , accuracy
print 'PositivePredictiveValue: ' , positivePredictiveValue
print 'NegativePredictiveValue' , NegativePredictiveValue
print 'PhiCoefficient' , PhiCoefficient
return {'SENS': sensitivity, 'SPEC': specificity, 'ACC': accuracy, 'EFF': efficiency,
'PPV':positivePredictiveValue, 'NPV':NegativePredictiveValue , 'PHI': PhiCoefficient}
def _calculate_counts(self,pos_data,neg_data):
""" Calculates the number of false positives, true positives, false negatives and true negatives """
tp_count = len([x for x in pos_data if x[0] == 1])
fp_count = len([x for x in pos_data if x[0] == 0])
fn_count = len([x for x in neg_data if x[0] == 1])
tn_count = len([x for x in neg_data if x[0] == 0])
return tp_count,fp_count,fn_count, tn_count
if __name__ == '__main__':
print "PyRoC - ROC Curve Generator"
print "By Marcel Pinheiro Caraciolo (@marcelcaraciolo)"
print "http://aimotion.bogspot.com\n"
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-f', '--file', dest='origFile', help="Path to a file with the class and decision function. The first column of each row is the class, and the second the decision score.")
parser.add_option("-n", "--max fp", dest = "fp_n", default=0, help= "Maximum false positives to calculate up to (for partial AUC).")
parser.add_option("-p","--plot", action="store_true",dest='plotFlag', default=False, help="Plot the ROC curve (matplotlib required)")
parser.add_option("-t",'--title', dest= 'ptitle' , default='' , help = 'Title of plot.')
(options,args) = parser.parse_args()
if (not options.origFile):
parser.print_help()
exit()
df_data = load_decision_function(options.origFile)
roc = ROCData(df_data)
roc_n = int(options.fp_n)
print "ROC AUC: %s" % (str(roc.auc(roc_n)),)
print 'Standard Error: %s' % (str(roc.calculateStandardError(roc_n)),)
print ''
for pt in roc.derived_points:
print pt[0],pt[1]
if options.plotFlag:
roc.plot(options.ptitle,True,True)
| gpl-3.0 |
PandaStabber/Goldberg_et_al_2016 | optimal_tree.py | 1 | 10148 | """
Before running this file, run 'make_database.py' first. Here it is important to
remember that we're not evaluating the generic performance of the decision tree
to predict the data outcome. We're using the decision tree as a method to
quantitatively investigate and breakdown the data... to see if we can disentangle
the relationship between physicochemical parameters and the retention behavior of
nanomaterials.
Note that, becuase there are elements of stochasticity in the decision tree
growing process, it can be difficult to obtain the optimal decision tree (n.b.
for most cases, there is never an optimal decision tree). Here, we cannot
guarentee optimality, but we can iteratively investigate the results of the
decision tree and pick the best one.
Here we employ many decision tree runs and report the index of the best one.
This index value should be used to declare the location of the output
hierarchical JSON file (flareXX.json), which is output to the
figures/decisionTreeVisualization/flare_reports folder. Once the index has been
located, modify the appropriate variable in the index.html file contained within
the decisionTreeVisualization folder and run it.
"""
import argparse
import json
import os
import pandas as pd
import numpy as np
import pydot
from sklearn import metrics
from sklearn import grid_search
from sklearn import tree
from sklearn.externals.six import StringIO
from sklearn.metrics import classification_report
from sklearn.cross_validation import StratifiedShuffleSplit
from helper_functions import (make_dirs, rules, add_boolean_argument)
# Default database
TRAINING_PATH = os.path.join('output', 'data', 'training_data.csv')
TARGET_PATH = os.path.join('output', 'data', 'target_data.csv')
# Seed to use when running in deterministic mode.
_SEED = 666
# TODO(peterthenelson) Break up into smaller functions.
def main(
output_dir='output', training_path=TRAINING_PATH, target_path=TARGET_PATH,
iterations=50, deterministic=False, stratified_holdout=True,
holdout_size=0.15, crossfolds=5):
"""Find optimal decision tree, write output files.
Parameters
----------
output_dir : str
Path to output directory.
training_path : str
Path to training data csv.
target_path : str
Path to target data csv.
iterations : int
Number of runs of fitting the model.
deterministic : bool
Turn off randomness (for testing).
stratified_holdout : bool
Turn off the use of a stratified holdout set. Use with caution:
False = train and test on the same data (ok for description, but
not prediction).
holdout_size : float
The percentage of the database not employed for training.
crossfolds : int
Number of folds for crossvalidation.
"""
# TODO(peterthenelson) This is a dumb thing to do. Name and location should
# be bundled into a config object, rather than trying to derive it from the
# path (or one of them).
database_basename = os.path.basename(training_path)
# Everything goes under this subdirectory.
output_dir = os.path.join(output_dir, 'classifier')
make_dirs(output_dir)
# Loop through all model interactions by looping through database names
run = 0
f1_binary_average_score_track = []
f1_report = pd.DataFrame()
target_data = np.squeeze(pd.read_csv(target_path))
training_data = pd.read_csv(training_path)
for run in xrange(iterations):
print run # Print for convenience
y_train = np.array(target_data)
x_train = training_data.as_matrix()
# assign the target data as y_all and the training data as x_all. Notice
# that we train AND test on the same data. This is not commmon, but
# we're employing the decision tree for a descriptive evaluation, not
# its generic prediction performance
if stratified_holdout:
random_state = _SEED if deterministic else None
sss = StratifiedShuffleSplit(
y_train, n_iter=1, test_size=holdout_size,
random_state=random_state)
for train_index, test_index in sss:
x_train, x_holdout = x_train[train_index], x_train[test_index]
y_train, y_holdout = y_train[train_index], y_train[test_index]
x_train_or_holdout = x_holdout
y_train_or_holdout = y_holdout
# if you want to seperate training data into holdout set to examine performance.
x_train_or_holdout = x_train
y_train_or_holdout = y_train
# initialize the classifier
clf = tree.DecisionTreeClassifier()
# optimize classifier by brute-force parameter investigation
dpgrid = {'max_depth': [3,4,5],
'min_samples_leaf': [11,12,13],
'max_features': [None, 'sqrt', 'log2'],
'random_state': [_SEED] if deterministic else [None]
}
# investigate the best possible set of parameters using a cross
# validation loop and the given grid. The cross-validation does not do
# random shuffles, but the estimator does use randomness (and
# takes random_state via dpgrid).
grid_searcher = grid_search.GridSearchCV(estimator=clf, cv=crossfolds,
param_grid=dpgrid, n_jobs=-1)
# call the grid search fit using the data
grid_searcher.fit(x_train, y_train)
# store and print the best parameters
best_params = grid_searcher.best_params_
# reinitialize and call the classifier with the best parameter
clf = tree.DecisionTreeClassifier(**best_params)
clf.fit(x_train, y_train)
# Evaluate external performance (how well does
# the trained model classify the holdout?)
y_pred = clf.predict(x_train_or_holdout)
# calculate the score for the combined class (weighted), and then
# each class individually
f1_binary_average_score = metrics.f1_score(
y_train_or_holdout, y_pred, pos_label=None, average='weighted')
f1_binary_average_score_exp = metrics.f1_score(
y_train_or_holdout, y_pred, pos_label=0)
f1_binary_average_score_nonexp = metrics.f1_score(
y_train_or_holdout, y_pred, pos_label=1)
# Compare the predictions to the truth directly and outut a file
# to inspect.
y_pred_frame = pd.DataFrame(y_pred, columns=['predicted'])
y_truth_frame = pd.DataFrame(y_train_or_holdout, columns=['truth'])
comparison = pd.concat([y_pred_frame, y_truth_frame], axis=1)
comparison.to_csv(os.path.join(output_dir, 'comparison.csv'))
# initialize scoring tracking dataframe to store the data
f1_track = pd.DataFrame()
f1_track['exponential'] = f1_binary_average_score_exp,
f1_track['nonexponential'] = f1_binary_average_score_nonexp
f1_track['average'] = f1_binary_average_score
f1_report = f1_report.append(f1_track) # pylint:disable=redefined-variable-type
f1_binary_average_score_track.append(f1_binary_average_score)
# The following section creates figures to visualize the decision tree
# as a PDF and to plot in D3 (java/html). Feature elimination is not
# included here, but was included previously. This grabs only the names
# in the remaining features.
grab_working_names = [str(i) for i in list(training_data)]
# set the path to save the json representation.
json_dir = os.path.join(output_dir, 'flare')
make_dirs(json_dir)
json_path = os.path.join(json_dir, 'flare%d.json' % (run + 1))
data_target_names = ['exponential', 'nonexponential']
tree_rules = rules(clf, grab_working_names, data_target_names)
with open(json_path, 'w') as outf:
outf.write(json.dumps(tree_rules))
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=grab_working_names, impurity=True,
rounded=True, filled=True, label='all',
leaves_parallel=True,
class_names=['exponential', 'nonexponential'])
graph = pydot.graph_from_dot_data(dot_data.getvalue())
make_dirs(os.path.join(output_dir, 'models'))
graph.write_pdf(os.path.join(output_dir, 'models/%d.pdf' % (run + 1)))
class_report_dir = os.path.join(output_dir, 'class_reports')
make_dirs(class_report_dir)
class_report_path = os.path.join(class_report_dir,
'class_report%d.txt' % (run + 1))
with open(class_report_path, "w") as outf:
outf.write(classification_report(
y_train_or_holdout, y_pred, target_names=['exponential', 'nonexponential']))
outf.write('\n')
report_save_path = os.path.join(output_dir, 'scores%d.csv' % (run + 1))
f1_report.to_csv(report_save_path)
f1_report.reset_index(inplace=True)
print f1_report.describe()
print "best performing decision tree index: ", f1_report['average'].argmax()
def parse_args():
"""Parse commandline arguments into a dict."""
parser = argparse.ArgumentParser()
# TODO(peterthenelson) I'm not a fan of duplicating the defaults here, but I
# don't see a better way.
parser.add_argument('--output_dir', default='output')
parser.add_argument('--training_path', default=TRAINING_PATH)
parser.add_argument('--target_path', default=TARGET_PATH)
parser.add_argument('--iterations', default=50, type=int)
add_boolean_argument(parser, 'stratified_holdout', default=True)
parser.add_argument('--holdout_size', default=0.15, type=float)
parser.add_argument('--crossfolds', default=5, type=int)
parsed = parser.parse_args()
return {k: getattr(parsed, k) for k in dir(parsed) if not k.startswith('_')}
if __name__ == '__main__': # wrap inside to prevent parallelize errors on windows.
main(**parse_args())
| mit |
LiaoPan/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
anntzer/scikit-learn | examples/mixture/plot_gmm_covariances.py | 30 | 4751 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
ax.set_aspect('equal', 'datalim')
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = {cov_type: GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0)
for cov_type in ['spherical', 'diag', 'tied', 'full']}
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
Rareson/LammpsRelated | pyqtgraph/widgets/MatplotlibWidget.py | 12 | 1213 | from ..Qt import QtGui, QtCore, USE_PYSIDE
import matplotlib
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| gpl-3.0 |
rgommers/scipy | scipy/signal/filter_design.py | 7 | 179656 | """Filter design."""
import math
import operator
import warnings
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, full, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from numpy.polynomial.polynomial import polyvalfromroots
from scipy import special, optimize, fft as sp_fft
from scipy.special import comb
from scipy._lib._util import float_factorial
from scipy.optimize import root_scalar
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
'gammatone', 'iircomb']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def _is_int_type(x):
"""
Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will
pass, while ``5.0`` and ``array([5])`` will fail.
"""
if np.ndim(x) != 0:
# Older versions of NumPy did not raise for np.array([1]).__index__()
# This is safe to remove when support for those versions is dropped
return False
try:
operator.index(x)
except TypeError:
return False
else:
return True
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e., zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=200, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
# For backwards compatibility
w = findfreqs(b, a, 200)
elif _is_int_type(worN):
w = findfreqs(b, a, worN)
else:
w = atleast_1d(worN)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=200):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
# For backwards compatibility
w = findfreqs(z, p, 200, kind='zp')
elif _is_int_type(worN):
w = findfreqs(z, p, worN, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi, include_nyquist=False):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + ... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + ... + a[N]e
Parameters
----------
b : array_like
Numerator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
a : array_like
Denominator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). This is a convenient alternative to::
np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if worN is array_like.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
include_nyquist : bool, optional
If `whole` is False and `worN` is an integer, setting `include_nyquist` to True
will include the last frequency (Nyquist frequency) and is otherwise ignored.
.. versionadded:: 1.5.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz_zpk
sosfreqz
Notes
-----
Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
for `plot` produces unexpected results, as this plots the real part of the
complex transfer function, not the magnitude.
Try ``lambda w, h: plot(w, np.abs(h))``.
A direct computation via (R)FFT is used to compute the frequency response
when the following conditions are met:
1. An integer value is given for `worN`.
2. `worN` is fast to compute via FFT (i.e.,
`next_fast_len(worN) <scipy.fft.next_fast_len>` equals `worN`).
3. The denominator coefficients are a single value (``a.shape[0] == 1``).
4. `worN` is at least as long as the numerator coefficients
(``worN >= b.shape[0]``).
5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
For long FIR filters, the FFT approach can have lower error and be much
faster than the equivalent direct polynomial calculation.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots()
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle (radians)', color='g')
>>> ax2.grid()
>>> ax2.axis('tight')
>>> plt.show()
Broadcasting Examples
Suppose we have two FIR filters whose coefficients are stored in the
rows of an array with shape (2, 25). For this demonstration, we'll
use random data:
>>> rng = np.random.default_rng()
>>> b = rng.random((2, 25))
To compute the frequency response for these two filters with one call
to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
axis to hold the coefficients. We must then extend the shape with a
trivial dimension of length 1 to allow broadcasting with the array
of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has
shape (25, 2, 1):
>>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
Now, suppose we have two transfer functions, with the same numerator
coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
are stored in the first dimension of the 2-D array `a`::
a = [ 1 1 ]
[ -0.25, -0.5 ]
>>> b = np.array([0.5, 0.5])
>>> a = np.array([[1, 1], [-0.25, -0.5]])
Only `a` is more than 1-D. To make it compatible for
broadcasting with the frequencies, we extend it with a trivial dimension
in the call to `freqz`:
>>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
"""
b = atleast_1d(b)
a = atleast_1d(a)
if worN is None:
# For backwards compatibility
worN = 512
h = None
if _is_int_type(worN):
N = operator.index(worN)
del worN
if N < 0:
raise ValueError('worN must be nonnegative, got %s' % (N,))
lastpoint = 2 * pi if whole else pi
# if include_nyquist is true and whole is false, w should include end point
w = np.linspace(0, lastpoint, N, endpoint=include_nyquist and not whole)
if (a.size == 1 and N >= b.shape[0] and
sp_fft.next_fast_len(N) == N and
(b.ndim == 1 or (b.shape[-1] == 1))):
# if N is fast, 2 * N will be fast, too, so no need to check
n_fft = N if whole else N * 2
if np.isrealobj(b) and np.isrealobj(a):
fft_func = sp_fft.rfft
else:
fft_func = sp_fft.fft
h = fft_func(b, n=n_fft, axis=0)[:N]
h /= a
if fft_func is sp_fft.rfft and whole:
# exclude DC and maybe Nyquist (no need to use axis_reverse
# here because we can build reversal with the truncation)
stop = -1 if n_fft % 2 == 1 else -2
h_flip = slice(stop, 0, -1)
h = np.concatenate((h, h[h_flip].conj()))
if b.ndim > 1:
# Last axis of h has length 1, so drop it.
h = h[..., 0]
# Rotate the first axis of h to the end.
h = np.rollaxis(h, 0, h.ndim)
else:
w = atleast_1d(worN)
del worN
w = 2*pi*w/fs
if h is None: # still need to compute using freqs w
zm1 = exp(-1j * w)
h = (npp_polyval(zm1, b, tensor=False) /
npp_polyval(zm1, a, tensor=False))
w = w*fs/(2*pi)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response:
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a
system with sample rate of 1000 Hz, and plot the frequency response:
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000)
>>> w, h = signal.freqz_zpk(z, p, k, fs=1000)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(1, 1, 1)
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [Hz]')
>>> ax1.grid()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle [radians]', color='g')
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
# For backwards compatibility
w = numpy.linspace(0, lastpoint, 512, endpoint=False)
elif _is_int_type(worN):
w = numpy.linspace(0, lastpoint, worN, endpoint=False)
else:
w = atleast_1d(worN)
w = 2*pi*w/fs
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
w = w*fs/(2*pi)
return w, h
def group_delay(system, w=512, whole=False, fs=2*pi):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the delay at the frequencies given. These
are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which group delay was computed, in the same units
as `fs`. By default, `w` is normalized to the range [0, pi)
(radians/sample).
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded:: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
# For backwards compatibility
w = 512
if _is_int_type(w):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
else:
w = np.atleast_1d(w)
w = 2*pi*w/fs
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
w = w*fs/(2*pi)
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). Using a number that is fast for FFT computations can result
in faster computations (see Notes of `freqz`).
If an array_like, compute the response at the frequencies given (must
be 1-D). These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc)
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print(zr)
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1-D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.nonzero(diffs > 0)[0]
run_stops = numpy.nonzero(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-D input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-D')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
result_type = sos.dtype
if result_type.kind in 'bui':
result_type = np.float64
b = np.array([1], dtype=result_type)
a = np.array([1], dtype=result_type)
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
The number of zeros and poles returned will be ``n_sections * 2``
even if some of these are (effectively) zero.
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.zeros(n_sections*2, np.complex128)
p = np.zeros(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*section+len(zpk[0])] = zpk[0]
p[2*section:2*section+len(zpk[1])] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.nonzero(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficients are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.nonzero(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections, np.array(k).dtype)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2-D
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2-D array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1-D.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1-D
array. A 2-D array if the input `num` is a 2-D array.
den: 1-D array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed low-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed low-pass filter.
See Also
--------
lp2hp, lp2bp, lp2bs, bilinear
lp2lp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_lp2, p_lp2 = lp2.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_lp2, label='Transformed Lowpass')
>>> plt.semilogx()
>>> plt.grid()
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed high-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed high-pass filter.
See Also
--------
lp2lp, lp2bp, lp2bs, bilinear
lp2hp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_hp, p_hp = hp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_hp, label='Highpass')
>>> plt.semilogx()
>>> plt.grid()
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed band-pass filter.
See Also
--------
lp2lp, lp2hp, lp2bs, bilinear
lp2bp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bp, p_bp = bp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bp, label='Bandpass')
>>> plt.semilogx()
>>> plt.grid()
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-stop filter.
a : array_like
Denominator polynomial coefficients of the transformed band-stop filter.
See Also
--------
lp2lp, lp2hp, lp2bp, bilinear
lp2bs_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.5])
>>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bs, p_bs = bs.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bs, label='Bandstop')
>>> plt.semilogx()
>>> plt.grid()
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
r"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
b : array_like
Numerator of the analog filter transfer function.
a : array_like
Denominator of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Numerator of the transformed digital filter transfer function.
p : ndarray
Denominator of the transformed digital filter transfer function.
See Also
--------
lp2lp, lp2hp, lp2bp, lp2bs
bilinear_zpk
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 100
>>> bf = 2 * np.pi * np.array([7, 13])
>>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass',
... analog=True))
>>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs))
>>> wz, hz = signal.freqz(filtz.num, filtz.den)
>>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz)
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
... label=r'$|H_z(e^{j \omega})|$')
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
... label=r'$|H(j \omega)|$')
>>> plt.legend()
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.grid()
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def _validate_gpass_gstop(gpass, gstop):
if gpass <= 0.0:
raise ValueError("gpass should be larger than 0.0")
elif gstop <= 0.0:
raise ValueError("gstop should be larger than 0.0")
elif gpass > gstop:
raise ValueError("gpass should be smaller than gstop")
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba',
fs=None):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float or array like, shape (2,)
Passband and stopband edge frequencies. Possible values are scalars
(for lowpass and highpass filters) or ranges (for bandpass and bandstop
filters).
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
Note, that for bandpass and bandstop filters passband must lie strictly
inside stopband or vice versa.
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import matplotlib.ticker
>>> wp = 0.2
>>> ws = 0.3
>>> gpass = 1
>>> gstop = 40
>>> system = signal.iirdesign(wp, ws, gpass, gstop)
>>> w, h = signal.freqz(*system)
>>> fig, ax1 = plt.subplots()
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [rad/sample]')
>>> ax1.grid()
>>> ax1.set_ylim([-120, 20])
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle (radians)', color='g')
>>> ax2.grid()
>>> ax2.axis('tight')
>>> ax2.set_ylim([-6, 1])
>>> nticks = 8
>>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
>>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError as e:
raise ValueError("Invalid IIR filter type: %s" % ftype) from e
except IndexError as e:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype) from e
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]:
raise ValueError("wp and ws must have one or two elements each, and"
"the same shape, got %s and %s"
% (wp.shape, ws.shape))
if wp.shape[0] == 2:
if wp[0] < 0 or ws[0] < 0:
raise ValueError("Values for wp, ws can't be negative")
elif 1 < wp[1] or 1 < ws[1]:
raise ValueError("Values for wp, ws can't be larger than 1")
elif not((ws[0] < wp[0] and wp[1] < ws[1]) or
(wp[0] < ws[0] and ws[1] < wp[1])):
raise ValueError("Passband must lie strictly inside stopband"
" or vice versa")
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output, fs=fs)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba', fs=None):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
200 Hz and plot the frequency response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
... btype='band', analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
Create a digital filter with the same properties, in a system with
sampling rate of 2000 Hz, and plot the frequency response. (Second-order
sections implementation is required to ensure stability of a filter of
this order):
>>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=False, ftype='cheby2', fs=2000,
... output='sos')
>>> w, h = signal.sosfreqz(sos, 2000, fs=2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
Wn = 2*Wn/fs
try:
btype = band_dict[btype]
except KeyError as e:
raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e
try:
typefunc = filter_dict[ftype][0]
except KeyError as e:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
if fs is not None:
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < fs/2 (fs={} -> fs/2={})".format(fs, fs/2))
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn for lowpass or highpass filter')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError as e:
raise ValueError('Wn must specify start and stop frequencies for bandpass or bandstop '
'filter') from e
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
def bilinear_zpk(z, p, k, fs):
r"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk
bilinear
Notes
-----
.. versionadded:: 1.1.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 100
>>> bf = 2 * np.pi * np.array([7, 13])
>>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True,
... output='zpk'))
>>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles,
... filts.gain, fs))
>>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain)
>>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain,
... worN=fs*wz)
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
... label=r'$|H_z(e^{j \omega})|$')
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
... label=r'$|H(j \omega)|$')
>>> plt.legend()
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.grid()
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def lp2lp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
See Also
--------
lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2lp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def lp2hp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
See Also
--------
lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2hp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear
lp2bp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear
lp2bs
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, full(degree, +1j*wo))
z_bs = append(z_bs, full(degree, -1j*wo))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
The critical frequency or frequencies. For lowpass and highpass
filters, Wn is a scalar; for bandpass and bandstop filters,
Wn is a length-2 sequence.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
If the transfer function form ``[b, a]`` is requested, numerical
problems can occur since the conversion between roots and
the polynomial coefficients is a numerically sensitive operation,
even for N >= 4. It is recommended to work with the SOS
representation.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter', fs=fs)
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1', fs=fs)
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2', fs=fs)
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic', fs=fs)
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase',
fs=None):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm, fs=fs)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
_validate_gpass_gstop(gpass, gstop)
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `butter`.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.empty(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby1`.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby2`.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.empty(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.empty(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
_POW10_LOG10 = np.log(10)
def _pow10m1(x):
"""10 ** x - 1 for x near 0"""
return np.expm1(_POW10_LOG10 * x)
def ellipord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `ellip`.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop)
arg0 = 1.0 / nat
d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2)
d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq)
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g., rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
# number of terms in solving degree equation
_ELLIPDEG_MMAX = 7
def _ellipdeg(n, m1):
"""Solve degree equation using nomes
Given n, m1, solve
n * K(m) / K'(m) = K1(m1) / K1'(m1)
for m
See [1], Eq. (49)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
K1 = special.ellipk(m1)
K1p = special.ellipkm1(m1)
q1 = np.exp(-np.pi * K1p / K1)
q = q1 ** (1/n)
mnum = np.arange(_ELLIPDEG_MMAX + 1)
mden = np.arange(1, _ELLIPDEG_MMAX + 2)
num = np.sum(q ** (mnum * (mnum+1)))
den = 1 + 2 * np.sum(q ** (mden**2))
return 16 * q * (num / den) ** 4
# Maximum number of iterations in Landen transformation recursion
# sequence. 10 is conservative; unit tests pass with 4, Orfanidis
# (see _arc_jac_cn [1]) suggests 5.
_ARC_JAC_SN_MAXITER = 10
def _arc_jac_sn(w, m):
"""Inverse Jacobian elliptic sn
Solve for z in w = sn(z, m)
Parameters
----------
w - complex scalar
argument
m - scalar
modulus; in interval [0, 1]
See [1], Eq. (56)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
def _complement(kx):
# (1-k**2) ** 0.5; the expression below
# works for small kx
return ((1 - kx) * (1 + kx)) ** 0.5
k = m ** 0.5
if k > 1:
return np.nan
elif k == 1:
return np.arctanh(w)
ks = [k]
niter = 0
while ks[-1] != 0:
k_ = ks[-1]
k_p = _complement(k_)
ks.append((1 - k_p) / (1 + k_p))
niter += 1
if niter > _ARC_JAC_SN_MAXITER:
raise ValueError('Landen transformation not converging')
K = np.product(1 + np.array(ks[1:])) * np.pi/2
wns = [w]
for kn, knext in zip(ks[:-1], ks[1:]):
wn = wns[-1]
wnext = ( 2 * wn
/
( (1 + knext) * (1 + _complement(kn * wn)) ) )
wns.append(wnext)
u = 2 / np.pi * np.arcsin(wns[-1])
z = K * u
return z
def _arc_jac_sc1(w, m):
"""Real inverse Jacobian sc, with complementary modulus
Solve for z in w = sc(z, 1-m)
w - real scalar
m - modulus
From [1], sc(z, m) = -i * sn(i * z, 1 - m)
References
----------
.. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html,
"Representations through other Jacobi functions"
"""
zcomplex = _arc_jac_sn(1j * w, m)
if abs(zcomplex.real) > 1e-14:
raise ValueError
return zcomplex.imag
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
.. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / _pow10m1(0.1 * rp))
k = -p
z = []
return asarray(z), asarray(p), k
eps_sq = _pow10m1(0.1 * rp)
eps = np.sqrt(eps_sq)
ck1_sq = eps_sq / _pow10m1(0.1 * rs)
if ck1_sq == 0:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq)
m = _ellipdeg(N, ck1_sq)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = _arc_jac_sc1(1. / eps, ck1_sq)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps_sq))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498, and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * math.factorial(k)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g., 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, https://www.ranecommercial.com/legacy/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q, fs=2.0):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Frequency to remove from a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60 Hz component from a
signal sampled at 200 Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notch filter
>>> b, a = signal.iirnotch(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch", fs)
def iirpeak(w0, Q, fs=2.0):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Frequency to be retained in a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300 Hz
component from a signal sampled at 1000 Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peak filter
>>> b, a = signal.iirpeak(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak", fs)
def _design_notch_peak_filter(w0, Q, ftype, fs=2.0):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. If `fs` is specified,
this is in the same units as `fs`. By default, it is a normalized
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0:
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
w0 = 2*w0/fs
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB attenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
def iircomb(w0, Q, ftype='notch', fs=2.0):
"""
Design IIR notching or peaking digital comb filter.
A notching comb filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
A peaking comb filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Frequency to attenuate (notching) or boost (peaking). If `fs` is
specified, this is in the same units as `fs`. By default, it is
a normalized scalar that must satisfy ``0 < w0 < 1``, with
``w0 = 1`` corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : {'notch', 'peak'}
The type of comb filter generated by the function. If 'notch', then
it returns a filter with notches at frequencies ``0``, ``w0``,
``2 * w0``, etc. If 'peak', then it returns a filter with peaks at
frequencies ``0.5 * w0``, ``1.5 * w0``, ``2.5 * w0```, etc.
Default is 'notch'.
fs : float, optional
The sampling frequency of the signal. Default is 2.0.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
Raises
------
ValueError
If `w0` is less than or equal to 0 or greater than or equal to
``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
is not 'notch' or 'peak'
See Also
--------
iirnotch
iirpeak
Notes
-----
For implementation details, see [1]_. The TF implementation of the
comb filter is numerically stable even at higher orders due to the
use of a single repeated pole, which won't suffer from precision loss.
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot notching comb filter at 20 Hz for a
signal sampled at 200 Hz, using quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 20.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notching comb filter
>>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-30, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
Design and plot peaking comb filter at 250 Hz for a
signal sampled at 1000 Hz, using quality factor Q = 30
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 250.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peaking filter
>>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-80, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
# Convert w0, Q, and fs to float
w0 = float(w0)
Q = float(Q)
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
filter_types = ['notch', 'peak']
if not 0 < w0 < fs / 2:
raise ValueError("w0 must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, w0))
if np.round(fs % w0) != 0:
raise ValueError('fs must be divisible by w0.')
if ftype not in filter_types:
raise ValueError('ftype must be either notch or peak.')
# Compute the order of the filter
N = int(fs // w0)
# Compute frequency in radians and filter bandwith
# Eq. 11.3.1 (p. 574) from reference [1]
w0 = (2 * np.pi * w0) / fs
w_delta = w0 / Q
# Define base gain values depending on notch or peak filter
# Compute -3dB attenuation
# Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
if ftype == 'notch':
G0, G = [1, 0]
elif ftype == 'peak':
G0, G = [0, 1]
GB = 1 / np.sqrt(2)
# Compute beta
# Eq. 11.5.3 (p. 591) from reference [1]
beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4)
# Compute filter coefficients
# Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
ax = (1 - beta) / (1 + beta)
bx = (G0 + G * beta) / (1 + beta)
cx = (G0 - G * beta) / (1 + beta)
# Compute numerator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# b - cz^-N or b + cz^-N
b = np.zeros(N + 1)
b[0] = bx
b[-1] = cx
if ftype == 'notch':
b[-1] = -cx
# Compute denominator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# 1 - az^-N or 1 + az^-N
a = np.zeros(N + 1)
a[0] = 1
a[-1] = ax
if ftype == 'notch':
a[-1] = -ax
return b, a
def _hz_to_erb(hz):
"""
Utility for converting from frequency (Hz) to the
Equivalent Rectangular Bandwith (ERB) scale
ERB = frequency / EarQ + minBW
"""
EarQ = 9.26449
minBW = 24.7
return hz / EarQ + minBW
def gammatone(freq, ftype, order=None, numtaps=None, fs=None):
"""
Gammatone filter design.
This function computes the coefficients of an FIR or IIR gammatone
digital filter [1]_.
Parameters
----------
freq : float
Center frequency of the filter (expressed in the same units
as `fs`).
ftype : {'fir', 'iir'}
The type of filter the function generates. If 'fir', the function
will generate an Nth order FIR gammatone filter. If 'iir', the
function will generate an 8th order digital IIR filter, modeled as
as 4th order gammatone filter.
order : int, optional
The order of the filter. Only used when ``ftype='fir'``.
Default is 4 to model the human auditory system. Must be between
0 and 24.
numtaps : int, optional
Length of the filter. Only used when ``ftype='fir'``.
Default is ``fs*0.015`` if `fs` is greater than 1000,
15 if `fs` is less than or equal to 1000.
fs : float, optional
The sampling frequency of the signal. `freq` must be between
0 and ``fs/2``. Default is 2.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials of the filter.
Raises
------
ValueError
If `freq` is less than or equal to 0 or greater than or equal to
``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than
or equal to 0 or greater than 24 when ``ftype='fir'``
See Also
--------
firwin
iirfilter
References
----------
.. [1] Slaney, Malcolm, "An Efficient Implementation of the
Patterson-Holdsworth Auditory Filter Bank", Apple Computer
Technical Report 35, 1993, pp.3-8, 34-39.
Examples
--------
16-sample 4th order FIR Gammatone filter centered at 440 Hz
>>> from scipy import signal
>>> signal.gammatone(440, 'fir', numtaps=16, fs=16000)
(array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06,
1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05,
1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05,
-1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]),
[1.0])
IIR Gammatone filter centered at 440 Hz
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.gammatone(440, 'iir', fs=16000)
>>> w, h = signal.freqz(b, a)
>>> plt.plot(w / ((2 * np.pi) / 16000), 20 * np.log10(abs(h)))
>>> plt.xscale('log')
>>> plt.title('Gammatone filter frequency response')
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(440, color='green') # cutoff frequency
>>> plt.show()
"""
# Converts freq to float
freq = float(freq)
# Set sampling rate if not passed
if fs is None:
fs = 2
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
filter_types = ['fir', 'iir']
if not 0 < freq < fs / 2:
raise ValueError("The frequency must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, freq))
if ftype not in filter_types:
raise ValueError('ftype must be either fir or iir.')
# Calculate FIR gammatone filter
if ftype == 'fir':
# Set order and numtaps if not passed
if order is None:
order = 4
order = operator.index(order)
if numtaps is None:
numtaps = max(int(fs * 0.015), 15)
numtaps = operator.index(numtaps)
# Check for invalid order
if not 0 < order <= 24:
raise ValueError("Invalid order: order must be > 0 and <= 24.")
# Gammatone impulse response settings
t = np.arange(numtaps) / fs
bw = 1.019 * _hz_to_erb(freq)
# Calculate the FIR gammatone filter
b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t)
b *= np.cos(2 * np.pi * freq * t)
# Scale the FIR filter so the frequency response is 1 at cutoff
scale_factor = 2 * (2 * np.pi * bw) ** (order)
scale_factor /= float_factorial(order - 1)
scale_factor /= fs
b *= scale_factor
a = [1.0]
# Calculate IIR gammatone filter
elif ftype == 'iir':
# Raise warning if order and/or numtaps is passed
if order is not None:
warnings.warn('order is not used for IIR gammatone filter.')
if numtaps is not None:
warnings.warn('numtaps is not used for IIR gammatone filter.')
# Gammatone impulse response settings
T = 1./fs
bw = 2 * np.pi * 1.019 * _hz_to_erb(freq)
fr = 2 * freq * np.pi * T
bwT = bw * T
# Calculate the gain to normalize the volume at the center frequency
g1 = -2 * np.exp(2j * fr) * T
g2 = 2 * np.exp(-(bwT) + 1j * fr) * T
g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr)
g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr)
g5 = np.exp(2j * fr)
g = g1 + g2 * (np.cos(fr) - g4)
g *= (g1 + g2 * (np.cos(fr) + g4))
g *= (g1 + g2 * (np.cos(fr) - g3))
g *= (g1 + g2 * (np.cos(fr) + g3))
g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4)
g = np.abs(g)
# Create empty filter coefficient lists
b = np.empty(5)
a = np.empty(9)
# Calculate the numerator coefficients
b[0] = (T ** 4) / g
b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g
b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g
b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g
b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g
# Calculate the denominator coefficients
a[0] = 1
a[1] = -8 * np.cos(fr) / np.exp(bw * T)
a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T)
a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
a[3] /= np.exp(3 * bw * T)
a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr))
a[4] /= np.exp(4 * bw * T)
a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
a[5] /= np.exp(5 * bw * T)
a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T)
a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T)
a[8] = np.exp(-8 * bw * T)
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
pprett/scikit-learn | sklearn/tests/test_learning_curve.py | 45 | 11897 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorFailing(BaseEstimator):
"""Dummy classifier to test error_score in learning curve"""
def fit(self, X_subset, y_subset):
raise ValueError()
def score(self, X=None, y=None):
return None
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_error_score():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
_, _, test_scores = learning_curve(estimator, X, y, cv=3, error_score=0)
all_zeros = not np.any(test_scores)
assert(all_zeros)
def test_learning_curve_error_score_default_raise():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
SiLab-Bonn/testbeam_analysis | testbeam_analysis/result_analysis.py | 1 | 88746 | ''' All functions creating results (e.g. efficiency, residuals, track density) from fitted tracks are listed here.'''
from __future__ import division
import logging
import re
from collections import Iterable
import os.path
import tables as tb
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import binned_statistic_2d
from scipy.optimize import curve_fit
from testbeam_analysis.tools import plot_utils
from testbeam_analysis.tools import geometry_utils
from testbeam_analysis.tools import analysis_utils
def calculate_residuals(input_tracks_file, input_alignment_file, n_pixels, pixel_size, output_residuals_file=None, dut_names=None, use_duts=None, max_chi2=None, nbins_per_pixel=None, npixels_per_bin=None, force_prealignment=False, use_fit_limits=True, cluster_size_selection=None, plot=True, gui=False, chunk_size=1000000):
'''Takes the tracks and calculates residuals for selected DUTs in col, row direction.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input aligment file.
n_pixels : iterable of tuples
One tuple per DUT describing the number of pixels in column, row direction
e.g. for 2 DUTs: n_pixels = [(80, 336), (80, 336)]
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension in um in column, row direction
e.g. for 2 DUTs: pixel_size = [(250, 50), (250, 50)]
output_residuals_file : string
Filename of the output residuals file. If None, the filename will be derived from the input hits file.
dut_names : iterable
Name of the DUTs. If None, DUT numbers will be used.
use_duts : iterable
The duts to calculate residuals for. If None all duts in the input_tracks_file are used
max_chi2 : uint, iterable
Use only not heavily scattered tracks to increase track pointing resolution (cut on chi2).
Cut can be a number and is used then for all DUTS or a list with a chi 2 cut for each DUT.
If None, no cut is applied.
nbins_per_pixel : int
Number of bins per pixel along the residual axis. Number is a positive integer or None to automatically set the binning.
npixels_per_bin : int
Number of pixels per bin along the position axis. Number is a positive integer or None to automatically set the binning.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cluster_size_selection : uint
Select which cluster sizes should be included for residual calculation. If None all cluster sizes are taken.
plot : bool
If True, create additional output plots.
gui : bool
If True, use GUI for plotting.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Calculating residuals ===')
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
if output_residuals_file is None:
output_residuals_file = os.path.splitext(input_tracks_file)[0] + '_residuals.h5'
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_residuals_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
figs = [] if gui else None
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * n_duts
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_residuals_file, mode='w') as out_file_h5:
for node in in_file_h5.root:
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if use_duts and actual_dut not in use_duts:
continue
logging.debug('Calculate residuals for DUT%d', actual_dut)
initialize = True # initialize the histograms
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# select good hits and tracks
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut]), ~np.isnan(tracks_chunk['track_chi2']))
tracks_chunk = tracks_chunk[selection] # Take only tracks where actual dut has a hit, otherwise residual wrong
if cluster_size_selection is not None:
tracks_chunk = tracks_chunk[tracks_chunk['n_hits_dut_%d' % actual_dut] == cluster_size_selection]
if max_chi2[actual_dut] is not None:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2[actual_dut]]
# Coordinates in global coordinate system (x, y, z)
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut], tracks_chunk['y_dut_%d' % actual_dut], tracks_chunk['z_dut_%d' % actual_dut]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'], tracks_chunk['offset_1'], tracks_chunk['offset_2']
# Transform to local coordinate system
if use_prealignment:
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from fine alignment information
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
if not np.allclose(hit_z_local, 0.0) or not np.allclose(intersection_z_local, 0.0):
logging.error('Hit z position = %s and z intersection %s', str(hit_z_local[:3]), str(intersection_z_local[:3]))
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
difference = np.column_stack((hit_x, hit_y, hit_z)) - np.column_stack((intersection_x, intersection_y, intersection_z))
difference_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local)) - np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
# Histogram residuals in different ways
if initialize: # Only true for the first iteration, calculate the binning for the histograms
initialize = False
plot_n_pixels = 6.0
# detect peaks and calculate width to estimate the size of the histograms
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference[:, 0]), np.max(difference[:, 0])
nbins = np.arange(min_difference - (pixel_size[actual_dut][0] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][0] / nbins_per_pixel), pixel_size[actual_dut][0] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference[:, 0], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_x, fwhm_x, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_x, fwhm_x, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_x, fwhm_x = 0.0, pixel_size[actual_dut][0] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference[:, 1]), np.max(difference[:, 1])
nbins = np.arange(min_difference - (pixel_size[actual_dut][1] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][1] / nbins_per_pixel), pixel_size[actual_dut][1] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference[:, 1], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_y, fwhm_y, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_y, fwhm_y, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_y, fwhm_y = 0.0, pixel_size[actual_dut][1] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference_local[:, 0]), np.max(difference_local[:, 0])
nbins = np.arange(min_difference - (pixel_size[actual_dut][0] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][0] / nbins_per_pixel), pixel_size[actual_dut][0] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference_local[:, 0], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_col, fwhm_col, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_col, fwhm_col, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_col, fwhm_col = 0.0, pixel_size[actual_dut][0] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference_local[:, 1]), np.max(difference_local[:, 1])
nbins = np.arange(min_difference - (pixel_size[actual_dut][1] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][1] / nbins_per_pixel), pixel_size[actual_dut][1] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference_local[:, 1], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_row, fwhm_row, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_row, fwhm_row, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_row, fwhm_row = 0.0, pixel_size[actual_dut][1] * plot_n_pixels
# calculate the binning of the histograms, the minimum size is given by plot_n_pixels, otherwise FWHM is taken into account
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][0], pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_x / pixel_size[actual_dut][0]))
if np.mod(width / pixel_size[actual_dut][0], 2) != 0:
width += pixel_size[actual_dut][0]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][0])
x_range = (center_x - 0.5 * width, center_x + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_x / pixel_size[actual_dut][0])
x_range = (center_x - width, center_x + width)
hist_residual_x_hist, hist_residual_x_xedges = np.histogram(difference[:, 0], range=x_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_x), np.max(intersection_x)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][0], npixels_per_bin * pixel_size[actual_dut][0])
else:
nbins = "auto"
_, hist_residual_x_yedges = np.histogram(intersection_x, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][1], pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_y / pixel_size[actual_dut][1]))
if np.mod(width / pixel_size[actual_dut][1], 2) != 0:
width += pixel_size[actual_dut][1]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][1])
y_range = (center_y - 0.5 * width, center_y + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_y / pixel_size[actual_dut][1])
y_range = (center_y - width, center_y + width)
hist_residual_y_hist, hist_residual_y_yedges = np.histogram(difference[:, 1], range=y_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_y), np.max(intersection_y)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][1], npixels_per_bin * pixel_size[actual_dut][1])
else:
nbins = "auto"
_, hist_residual_y_xedges = np.histogram(intersection_y, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][0], pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_col / pixel_size[actual_dut][0]))
if np.mod(width / pixel_size[actual_dut][0], 2) != 0:
width += pixel_size[actual_dut][0]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][0])
col_range = (center_col - 0.5 * width, center_col + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_col / pixel_size[actual_dut][0])
col_range = (center_col - width, center_col + width)
hist_residual_col_hist, hist_residual_col_xedges = np.histogram(difference_local[:, 0], range=col_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_x_local), np.max(intersection_x_local)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][0], npixels_per_bin * pixel_size[actual_dut][0])
else:
nbins = "auto"
_, hist_residual_col_yedges = np.histogram(intersection_x_local, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][1], pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_row / pixel_size[actual_dut][1]))
if np.mod(width / pixel_size[actual_dut][1], 2) != 0:
width += pixel_size[actual_dut][1]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][1])
row_range = (center_row - 0.5 * width, center_row + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_row / pixel_size[actual_dut][1])
row_range = (center_row - width, center_row + width)
hist_residual_row_hist, hist_residual_row_yedges = np.histogram(difference_local[:, 1], range=row_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_y_local), np.max(intersection_y_local)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][1], npixels_per_bin * pixel_size[actual_dut][1])
else:
nbins = "auto"
_, hist_residual_row_xedges = np.histogram(intersection_y_local, bins=nbins)
# global x residual against x position
hist_x_residual_x_hist, hist_x_residual_x_xedges, hist_x_residual_x_yedges = np.histogram2d(
intersection_x,
difference[:, 0],
bins=(hist_residual_x_yedges, hist_residual_x_xedges))
# global y residual against y position
hist_y_residual_y_hist, hist_y_residual_y_xedges, hist_y_residual_y_yedges = np.histogram2d(
intersection_y,
difference[:, 1],
bins=(hist_residual_y_xedges, hist_residual_y_yedges))
# global y residual against x position
hist_x_residual_y_hist, hist_x_residual_y_xedges, hist_x_residual_y_yedges = np.histogram2d(
intersection_x,
difference[:, 1],
bins=(hist_residual_x_yedges, hist_residual_y_yedges))
# global x residual against y position
hist_y_residual_x_hist, hist_y_residual_x_xedges, hist_y_residual_x_yedges = np.histogram2d(
intersection_y,
difference[:, 0],
bins=(hist_residual_y_xedges, hist_residual_x_xedges))
# local column residual against column position
hist_col_residual_col_hist, hist_col_residual_col_xedges, hist_col_residual_col_yedges = np.histogram2d(
intersection_x_local,
difference_local[:, 0],
bins=(hist_residual_col_yedges, hist_residual_col_xedges))
# local row residual against row position
hist_row_residual_row_hist, hist_row_residual_row_xedges, hist_row_residual_row_yedges = np.histogram2d(
intersection_y_local,
difference_local[:, 1],
bins=(hist_residual_row_xedges, hist_residual_row_yedges))
# local row residual against column position
hist_col_residual_row_hist, hist_col_residual_row_xedges, hist_col_residual_row_yedges = np.histogram2d(
intersection_x_local,
difference_local[:, 1],
bins=(hist_residual_col_yedges, hist_residual_row_yedges))
# local column residual against row position
hist_row_residual_col_hist, hist_row_residual_col_xedges, hist_row_residual_col_yedges = np.histogram2d(
intersection_y_local,
difference_local[:, 0],
bins=(hist_residual_row_xedges, hist_residual_col_xedges))
else: # adding data to existing histograms
hist_residual_x_hist += np.histogram(difference[:, 0], bins=hist_residual_x_xedges)[0]
hist_residual_y_hist += np.histogram(difference[:, 1], bins=hist_residual_y_yedges)[0]
hist_residual_col_hist += np.histogram(difference_local[:, 0], bins=hist_residual_col_xedges)[0]
hist_residual_row_hist += np.histogram(difference_local[:, 1], bins=hist_residual_row_yedges)[0]
# global x residual against x position
hist_x_residual_x_hist += np.histogram2d(
intersection_x,
difference[:, 0],
bins=(hist_x_residual_x_xedges, hist_x_residual_x_yedges))[0]
# global y residual against y position
hist_y_residual_y_hist += np.histogram2d(
intersection_y,
difference[:, 1],
bins=(hist_y_residual_y_xedges, hist_y_residual_y_yedges))[0]
# global y residual against x position
hist_x_residual_y_hist += np.histogram2d(
intersection_x,
difference[:, 1],
bins=(hist_x_residual_y_xedges, hist_x_residual_y_yedges))[0]
# global x residual against y position
hist_y_residual_x_hist += np.histogram2d(
intersection_y,
difference[:, 0],
bins=(hist_y_residual_x_xedges, hist_y_residual_x_yedges))[0]
# local column residual against column position
hist_col_residual_col_hist += np.histogram2d(
intersection_x_local,
difference_local[:, 0],
bins=(hist_col_residual_col_xedges, hist_col_residual_col_yedges))[0]
# local row residual against row position
hist_row_residual_row_hist += np.histogram2d(
intersection_y_local,
difference_local[:, 1],
bins=(hist_row_residual_row_xedges, hist_row_residual_row_yedges))[0]
# local row residual against column position
hist_col_residual_row_hist += np.histogram2d(
intersection_x_local,
difference_local[:, 1],
bins=(hist_col_residual_row_xedges, hist_col_residual_row_yedges))[0]
# local column residual against row position
hist_row_residual_col_hist += np.histogram2d(
intersection_y_local,
difference_local[:, 0],
bins=(hist_row_residual_col_xedges, hist_row_residual_col_yedges))[0]
logging.debug('Storing residual histograms...')
dut_name = dut_names[actual_dut] if dut_names else ("DUT" + str(actual_dut))
# Global residuals
fit_residual_x, cov_residual_x = analysis_utils.fit_residuals(
hist=hist_residual_x_hist,
edges=hist_residual_x_xedges,
label='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_x = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_x_hist.dtype),
shape=hist_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_x.attrs.xedges = hist_residual_x_xedges
out_res_x.attrs.fit_coeff = fit_residual_x
out_res_x.attrs.fit_cov = cov_residual_x
out_res_x[:] = hist_residual_x_hist
fit_residual_y, cov_residual_y = analysis_utils.fit_residuals(
hist=hist_residual_y_hist,
edges=hist_residual_y_yedges,
label='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_y = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_y_hist.dtype),
shape=hist_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_y.attrs.yedges = hist_residual_y_yedges
out_res_y.attrs.fit_coeff = fit_residual_y
out_res_y.attrs.fit_cov = cov_residual_y
out_res_y[:] = hist_residual_y_hist
fit_x_residual_x, cov_x_residual_x = analysis_utils.fit_residuals_vs_position(
hist=hist_x_residual_x_hist,
xedges=hist_x_residual_x_xedges,
yedges=hist_x_residual_x_yedges,
xlabel='X position [um]',
ylabel='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_x_res_x = out_file_h5.create_carray(out_file_h5.root,
name='XResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction as a function of the x position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_x_residual_x_hist.dtype),
shape=hist_x_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_x.attrs.xedges = hist_x_residual_x_xedges
out_x_res_x.attrs.yedges = hist_x_residual_x_yedges
out_x_res_x.attrs.fit_coeff = fit_x_residual_x
out_x_res_x.attrs.fit_cov = cov_x_residual_x
out_x_res_x[:] = hist_x_residual_x_hist
fit_y_residual_y, cov_y_residual_y = analysis_utils.fit_residuals_vs_position(
hist=hist_y_residual_y_hist,
xedges=hist_y_residual_y_xedges,
yedges=hist_y_residual_y_yedges,
xlabel='Y position [um]',
ylabel='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_y_res_y = out_file_h5.create_carray(out_file_h5.root,
name='YResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the y position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_y_residual_y_hist.dtype),
shape=hist_y_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_y_res_y.attrs.xedges = hist_y_residual_y_xedges
out_y_res_y.attrs.yedges = hist_y_residual_y_yedges
out_y_res_y.attrs.fit_coeff = fit_y_residual_y
out_y_res_y.attrs.fit_cov = cov_y_residual_y
out_y_res_y[:] = hist_y_residual_y_hist
fit_x_residual_y, cov_x_residual_y = analysis_utils.fit_residuals_vs_position(
hist=hist_x_residual_y_hist,
xedges=hist_x_residual_y_xedges,
yedges=hist_x_residual_y_yedges,
xlabel='X position [um]',
ylabel='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_x_res_y = out_file_h5.create_carray(out_file_h5.root,
name='XResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the x position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_x_residual_y_hist.dtype),
shape=hist_x_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_y.attrs.xedges = hist_x_residual_y_xedges
out_x_res_y.attrs.yedges = hist_x_residual_y_yedges
out_x_res_y.attrs.fit_coeff = fit_x_residual_y
out_x_res_y.attrs.fit_cov = cov_x_residual_y
out_x_res_y[:] = hist_x_residual_y_hist
fit_y_residual_x, cov_y_residual_x = analysis_utils.fit_residuals_vs_position(
hist=hist_y_residual_x_hist,
xedges=hist_y_residual_x_xedges,
yedges=hist_y_residual_x_yedges,
xlabel='Y position [um]',
ylabel='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_y_res_x = out_file_h5.create_carray(out_file_h5.root,
name='YResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction as a function of the y position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_y_residual_x_hist.dtype),
shape=hist_y_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_y_res_x.attrs.xedges = hist_y_residual_x_xedges
out_y_res_x.attrs.yedges = hist_y_residual_x_yedges
out_y_res_x.attrs.fit_coeff = fit_y_residual_x
out_y_res_x.attrs.fit_cov = cov_y_residual_x
out_y_res_x[:] = hist_y_residual_x_hist
# Local residuals
fit_residual_col, cov_residual_col = analysis_utils.fit_residuals(
hist=hist_residual_col_hist,
edges=hist_residual_col_xedges,
label='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_col = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_col_hist.dtype),
shape=hist_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_col.attrs.xedges = hist_residual_col_xedges
out_res_col.attrs.fit_coeff = fit_residual_col
out_res_col.attrs.fit_cov = cov_residual_col
out_res_col[:] = hist_residual_col_hist
fit_residual_row, cov_residual_row = analysis_utils.fit_residuals(
hist=hist_residual_row_hist,
edges=hist_residual_row_yedges,
label='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_row = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_row_hist.dtype),
shape=hist_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_row.attrs.yedges = hist_residual_row_yedges
out_res_row.attrs.fit_coeff = fit_residual_row
out_res_row.attrs.fit_cov = cov_residual_row
out_res_row[:] = hist_residual_row_hist
fit_col_residual_col, cov_col_residual_col = analysis_utils.fit_residuals_vs_position(
hist=hist_col_residual_col_hist,
xedges=hist_col_residual_col_xedges,
yedges=hist_col_residual_col_yedges,
xlabel='Column position [um]',
ylabel='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_col_res_col = out_file_h5.create_carray(out_file_h5.root,
name='ColResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction as a function of the column position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_col_residual_col_hist.dtype),
shape=hist_col_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_col.attrs.xedges = hist_col_residual_col_xedges
out_col_res_col.attrs.yedges = hist_col_residual_col_yedges
out_col_res_col.attrs.fit_coeff = fit_col_residual_col
out_col_res_col.attrs.fit_cov = cov_col_residual_col
out_col_res_col[:] = hist_col_residual_col_hist
fit_row_residual_row, cov_row_residual_row = analysis_utils.fit_residuals_vs_position(
hist=hist_row_residual_row_hist,
xedges=hist_row_residual_row_xedges,
yedges=hist_row_residual_row_yedges,
xlabel='Row position [um]',
ylabel='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_row_res_row = out_file_h5.create_carray(out_file_h5.root,
name='RowResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the row position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_row_residual_row_hist.dtype),
shape=hist_row_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row_res_row.attrs.xedges = hist_row_residual_row_xedges
out_row_res_row.attrs.yedges = hist_row_residual_row_yedges
out_row_res_row.attrs.fit_coeff = fit_row_residual_row
out_row_res_row.attrs.fit_cov = cov_row_residual_row
out_row_res_row[:] = hist_row_residual_row_hist
fit_col_residual_row, cov_col_residual_row = analysis_utils.fit_residuals_vs_position(
hist=hist_col_residual_row_hist,
xedges=hist_col_residual_row_xedges,
yedges=hist_col_residual_row_yedges,
xlabel='Column position [um]',
ylabel='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_col_res_row = out_file_h5.create_carray(out_file_h5.root,
name='ColResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the column position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_col_residual_row_hist.dtype),
shape=hist_col_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_row.attrs.xedges = hist_col_residual_row_xedges
out_col_res_row.attrs.yedges = hist_col_residual_row_yedges
out_col_res_row.attrs.fit_coeff = fit_col_residual_row
out_col_res_row.attrs.fit_cov = cov_col_residual_row
out_col_res_row[:] = hist_col_residual_row_hist
fit_row_residual_col, cov_row_residual_col = analysis_utils.fit_residuals_vs_position(
hist=hist_row_residual_col_hist,
xedges=hist_row_residual_col_xedges,
yedges=hist_row_residual_col_yedges,
xlabel='Row position [um]',
ylabel='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_row_res_col = out_file_h5.create_carray(out_file_h5.root,
name='RowResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction as a function of the row position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_row_residual_col_hist.dtype),
shape=hist_row_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row_res_col.attrs.xedges = hist_row_residual_col_xedges
out_row_res_col.attrs.yedges = hist_row_residual_col_yedges
out_row_res_col.attrs.fit_coeff = fit_row_residual_col
out_row_res_col.attrs.fit_cov = cov_row_residual_col
out_row_res_col[:] = hist_row_residual_col_hist
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
def calculate_efficiency(input_tracks_file, input_alignment_file, bin_size, sensor_size, output_efficiency_file=None, pixel_size=None, n_pixels=None, minimum_track_density=1, max_distance=500, use_duts=None, max_chi2=None, force_prealignment=False, cut_distance=None, col_range=None, row_range=None, show_inefficient_events=False, plot=True, gui=False, chunk_size=1000000):
'''Takes the tracks and calculates the hit efficiency and hit/track hit distance for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input alignment file.
bin_size : iterable
Sizes of bins (i.e. (virtual) pixel size). Give one tuple (x, y) for every plane or list of tuples for different planes.
sensor_size : Tuple or list of tuples
Describes the sensor size for each DUT. If one tuple is given it is (size x, size y)
If several tuples are given it is [(DUT0 size x, DUT0 size y), (DUT1 size x, DUT1 size y), ...]
output_efficiency_file : string
Filename of the output efficiency file. If None, the filename will be derived from the input hits file.
minimum_track_density : int
Minimum track density required to consider bin for efficiency calculation.
use_duts : iterable
Calculate the efficiency for selected DUTs. If None, all duts are selected.
max_chi2 : uint
Only use tracks with a chi2 <= max_chi2.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cut_distance : int
Use only distances (between DUT hit and track hit) smaller than cut_distance.
max_distance : int
Defines binnig of distance values.
col_range : iterable
Column value to calculate efficiency for (to neglect noisy edge pixels for efficiency calculation).
row_range : iterable
Row value to calculate efficiency for (to neglect noisy edge pixels for efficiency calculation).
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
pixel_size : iterable
tuple or list of col/row pixel dimension
n_pixels : iterable
tuple or list of amount of pixel in col/row dimension
show_inefficient_events : bool
Whether to log inefficient events
gui : bool
If True, use GUI for plotting.
'''
logging.info('=== Calculating efficiency ===')
if output_efficiency_file is None:
output_efficiency_file = os.path.splitext(input_tracks_file)[0] + '_efficiency.h5'
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_efficiency_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
use_duts = use_duts if use_duts is not None else range(n_duts) # standard setting: fit tracks for all DUTs
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * len(use_duts)
efficiencies = []
pass_tracks = []
total_tracks = []
figs = [] if gui else None
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_efficiency_file, 'w') as out_file_h5:
for index, node in enumerate(in_file_h5.root):
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if actual_dut not in use_duts:
continue
dut_index = np.where(np.array(use_duts) == actual_dut)[0][0]
logging.info('Calculate efficiency for DUT%d', actual_dut)
# Calculate histogram properties (bins size and number of bins)
bin_size = [bin_size, ] if not isinstance(bin_size, Iterable) else bin_size
if len(bin_size) == 1:
actual_bin_size_x = bin_size[0][0]
actual_bin_size_y = bin_size[0][1]
else:
actual_bin_size_x = bin_size[dut_index][0]
actual_bin_size_y = bin_size[dut_index][1]
dimensions = [sensor_size, ] if not isinstance(sensor_size, Iterable) else sensor_size # Sensor dimensions for each DUT
if len(dimensions) == 1:
dimensions = dimensions[0]
else:
dimensions = dimensions[dut_index]
n_bin_x = int(dimensions[0] / actual_bin_size_x)
n_bin_y = int(dimensions[1] / actual_bin_size_y)
# Define result histograms, these are filled for each hit chunk
# total_distance_array = np.zeros(shape=(n_bin_x, n_bin_y, max_distance))
total_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
total_track_density = np.zeros(shape=(n_bin_x, n_bin_y))
total_track_density_with_DUT_hit = np.zeros(shape=(n_bin_x, n_bin_y))
actual_max_chi2 = max_chi2[dut_index]
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# Cut in Chi 2 of the track fit
if actual_max_chi2:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2]
# Transform the hits and track intersections into the local coordinate system
# Coordinates in global coordinate system (x, y, z)
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut], tracks_chunk['y_dut_%d' % actual_dut], tracks_chunk['z_dut_%d' % actual_dut]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'], tracks_chunk['offset_1'], tracks_chunk['offset_2']
# Transform to local coordinate system
if use_prealignment:
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from alignment information
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
# Quickfix that center of sensor is local system is in the center and not at the edge
hit_x_local, hit_y_local = hit_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersection_x_local, intersection_y_local = intersection_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], intersection_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersections_local = np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
hits_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local))
if not np.allclose(hits_local[np.isfinite(hits_local[:, 2]), 2], 0.0) or not np.allclose(intersection_z_local, 0.0):
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
# Usefull for debugging, print some inefficient events that can be cross checked
# Select virtual hits
sel_virtual = np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
if show_inefficient_events:
logging.info('These events are inefficient: %s', str(tracks_chunk['event_number'][sel_virtual]))
# Select hits from column, row range (e.g. to supress edge pixels)
col_range = [col_range, ] if not isinstance(col_range, Iterable) else col_range
if len(col_range) == 1:
curr_col_range = col_range[0]
else:
curr_col_range = col_range[dut_index]
if curr_col_range is not None:
selection = np.logical_and(intersections_local[:, 0] >= curr_col_range[0], intersections_local[:, 0] <= curr_col_range[1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
row_range = [row_range, ] if not isinstance(row_range, Iterable) else row_range
if len(row_range) == 1:
curr_row_range = row_range[0]
else:
curr_row_range = row_range[dut_index]
if curr_row_range is not None:
selection = np.logical_and(intersections_local[:, 1] >= curr_row_range[0], intersections_local[:, 1] <= curr_row_range[1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
# Calculate distance between track hit and DUT hit
scale = np.square(np.array((1, 1, 0))) # Regard pixel size for calculating distances
distance = np.sqrt(np.dot(np.square(intersections_local - hits_local), scale)) # Array with distances between DUT hit and track hit for each event. Values in um
col_row_distance = np.column_stack((hits_local[:, 0], hits_local[:, 1], distance))
# total_distance_array += np.histogramdd(col_row_distance, bins=(n_bin_x, n_bin_y, max_distance), range=[[0, dimensions[0]], [0, dimensions[1]], [0, max_distance]])[0]
total_hit_hist += (np.histogram2d(hits_local[:, 0], hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
# total_hit_hist += (np.histogram2d(hits_local[:, 0], hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[-dimensions[0] / 2., dimensions[0] / 2.], [-dimensions[1] / 2., dimensions[1] / 2.]])[0]).astype(np.uint32)
# Calculate efficiency
selection = ~np.isnan(hits_local[:, 0])
if cut_distance: # Select intersections where hit is in given distance around track intersection
intersection_valid_hit = intersections_local[np.logical_and(selection, distance < cut_distance)]
else:
intersection_valid_hit = intersections_local[selection]
total_track_density += np.histogram2d(intersections_local[:, 0], intersections_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]
total_track_density_with_DUT_hit += np.histogram2d(intersection_valid_hit[:, 0], intersection_valid_hit[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]
if np.all(total_track_density == 0):
logging.warning('No tracks on DUT%d, cannot calculate efficiency', actual_dut)
continue
efficiency = np.zeros_like(total_track_density_with_DUT_hit)
efficiency[total_track_density != 0] = total_track_density_with_DUT_hit[total_track_density != 0].astype(np.float) / total_track_density[total_track_density != 0].astype(np.float) * 100.
efficiency = np.ma.array(efficiency, mask=total_track_density < minimum_track_density)
if not np.any(efficiency):
raise RuntimeError('All efficiencies for DUT%d are zero, consider changing cut values!', actual_dut)
# Calculate distances between hit and intersection
# distance_mean_array = np.average(total_distance_array, axis=2, weights=range(0, max_distance)) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_mean_array = np.ma.masked_invalid(distance_mean_array)
# distance_max_array = np.amax(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_min_array = np.amin(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_max_array = np.ma.masked_invalid(distance_max_array)
# distance_min_array = np.ma.masked_invalid(distance_min_array)
# plot_utils.plot_track_distances(distance_min_array, distance_max_array, distance_mean_array)
plot_utils.efficiency_plots(total_hit_hist, total_track_density, total_track_density_with_DUT_hit, efficiency, actual_dut, minimum_track_density, plot_range=dimensions, cut_distance=cut_distance, output_pdf=output_pdf, gui=gui, figs=figs)
# Calculate mean efficiency without any binning
eff, eff_err_min, eff_err_pl = analysis_utils.get_mean_efficiency(array_pass=total_track_density_with_DUT_hit,
array_total=total_track_density)
logging.info('Efficiency = %1.4f - %1.4f + %1.4f', eff, eff_err_min, eff_err_pl)
efficiencies.append(np.ma.mean(efficiency))
dut_group = out_file_h5.create_group(out_file_h5.root, 'DUT_%d' % actual_dut)
out_efficiency = out_file_h5.create_carray(dut_group, name='Efficiency', title='Efficiency map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(efficiency.dtype), shape=efficiency.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_efficiency_mask = out_file_h5.create_carray(dut_group, name='Efficiency_mask', title='Masked pixel map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(efficiency.mask.dtype), shape=efficiency.mask.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# For correct statistical error calculation the number of detected tracks over total tracks is needed
out_pass = out_file_h5.create_carray(dut_group, name='Passing_tracks', title='Passing events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_track_density_with_DUT_hit.dtype), shape=total_track_density_with_DUT_hit.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_total = out_file_h5.create_carray(dut_group, name='Total_tracks', title='Total events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_track_density.dtype), shape=total_track_density.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
pass_tracks.append(total_track_density_with_DUT_hit.sum())
total_tracks.append(total_track_density.sum())
logging.info('Passing / total tracks: %d / %d', total_track_density_with_DUT_hit.sum(), total_track_density.sum())
# Store parameters used for efficiency calculation
out_efficiency.attrs.bin_size = bin_size
out_efficiency.attrs.minimum_track_density = minimum_track_density
out_efficiency.attrs.sensor_size = sensor_size
out_efficiency.attrs.use_duts = use_duts
out_efficiency.attrs.max_chi2 = max_chi2
out_efficiency.attrs.cut_distance = cut_distance
out_efficiency.attrs.max_distance = max_distance
out_efficiency.attrs.col_range = col_range
out_efficiency.attrs.row_range = row_range
out_efficiency[:] = efficiency.T
out_efficiency_mask[:] = efficiency.mask.T
out_pass[:] = total_track_density_with_DUT_hit.T
out_total[:] = total_track_density.T
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
return efficiencies, pass_tracks, total_tracks
def calculate_purity(input_tracks_file, input_alignment_file, bin_size, sensor_size, output_purity_file=None, pixel_size=None, n_pixels=None, minimum_hit_density=10, max_distance=500, use_duts=None, max_chi2=None, force_prealignment=False, cut_distance=None, col_range=None, row_range=None, show_inefficient_events=False, output_file=None, plot=True, chunk_size=1000000):
'''Takes the tracks and calculates the hit purity and hit/track hit distance for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename with the tracks table.
input_alignment_file : pytables file
Filename of the input aligment data.
bin_size : iterable
Bins sizes (i.e. (virtual) pixel size). Give one tuple (x, y) for every plane or list of tuples for different planes.
sensor_size : Tuple or list of tuples
Describes the sensor size for each DUT. If one tuple is given it is (size x, size y).
If several tuples are given it is [(DUT0 size x, DUT0 size y), (DUT1 size x, DUT1 size y), ...].
output_purity_file : string
Filename of the output purity file. If None, the filename will be derived from the input hits file.
minimum_hit_density : int
Minimum hit density required to consider bin for purity calculation.
use_duts : iterable
The DUTs to calculate purity for. If None all duts are used.
max_chi2 : int
Only use track with a chi2 <= max_chi2.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cut_distance : int
Hit - track intersection <= cut_distance = pure hit (hit assigned to track).
Hit - track intersection > cut_distance = inpure hit (hit without a track).
max_distance : int
Defines binnig of distance values.
col_range, row_range : iterable
Column / row value to calculate purity for (to neglect noisy edge pixels for purity calculation).
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Calculate purity ===')
if output_purity_file is None:
output_purity_file = os.path.splitext(input_tracks_file)[0] + '_purity.h5'
if plot is True:
output_pdf = PdfPages(os.path.splitext(output_purity_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
if not use_prealignment:
try:
alignment = in_file_h5.root.Alignment[:]
logging.info('Use alignment data')
except tb.exceptions.NodeError:
use_prealignment = True
logging.info('Use prealignment data')
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * n_duts
purities = []
pure_hits = []
total_hits = []
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_purity_file, 'w') as out_file_h5:
for index, node in enumerate(in_file_h5.root):
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if use_duts and actual_dut not in use_duts:
continue
logging.info('Calculate purity for DUT %d', actual_dut)
# Calculate histogram properties (bins size and number of bins)
bin_size = [bin_size, ] if not isinstance(bin_size, Iterable) else bin_size
if len(bin_size) != 1:
actual_bin_size_x = bin_size[index][0]
actual_bin_size_y = bin_size[index][1]
else:
actual_bin_size_x = bin_size[0][0]
actual_bin_size_y = bin_size[0][1]
dimensions = [sensor_size, ] if not isinstance(sensor_size, Iterable) else sensor_size # Sensor dimensions for each DUT
if len(dimensions) == 1:
dimensions = dimensions[0]
else:
dimensions = dimensions[index]
n_bin_x = int(dimensions[0] / actual_bin_size_x)
n_bin_y = int(dimensions[1] / actual_bin_size_y)
# Define result histograms, these are filled for each hit chunk
total_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
total_pure_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
actual_max_chi2 = max_chi2[index]
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# Take only tracks where actual dut has a hit, otherwise residual wrong
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut]), ~np.isnan(tracks_chunk['track_chi2']))
selection_hit = ~np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
# Cut in Chi 2 of the track fit
if actual_max_chi2:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2]
# Transform the hits and track intersections into the local coordinate system
# Coordinates in global coordinate system (x, y, z)
hit_x_dut, hit_y_dut, hit_z_dut = tracks_chunk['x_dut_%d' % actual_dut][selection_hit], tracks_chunk['y_dut_%d' % actual_dut][selection_hit], tracks_chunk['z_dut_%d' % actual_dut][selection_hit]
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut][selection], tracks_chunk['y_dut_%d' % actual_dut][selection], tracks_chunk['z_dut_%d' % actual_dut][selection]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'][selection], tracks_chunk['offset_1'][selection], tracks_chunk['offset_2'][selection]
# Transform to local coordinate system
if use_prealignment:
hit_x_local_dut, hit_y_local_dut, hit_z_local_dut = geometry_utils.apply_alignment(hit_x_dut, hit_y_dut, hit_z_dut,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from alignment information
hit_x_local_dut, hit_y_local_dut, hit_z_local_dut = geometry_utils.apply_alignment(hit_x_dut, hit_y_dut, hit_z_dut,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
# Quickfix that center of sensor is local system is in the center and not at the edge
hit_x_local_dut, hit_y_local_dut = hit_x_local_dut + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local_dut + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
hit_x_local, hit_y_local = hit_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersection_x_local, intersection_y_local = intersection_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], intersection_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersections_local = np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
hits_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local))
hits_local_dut = np.column_stack((hit_x_local_dut, hit_y_local_dut, hit_z_local_dut))
if not np.allclose(hits_local[np.isfinite(hits_local[:, 2]), 2], 0.0) or not np.allclose(intersection_z_local, 0.0):
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
# Usefull for debugging, print some inpure events that can be cross checked
# Select virtual hits
sel_virtual = np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
if show_inefficient_events:
logging.info('These events are unpure: %s', str(tracks_chunk['event_number'][sel_virtual]))
# Select hits from column, row range (e.g. to supress edge pixels)
col_range = [col_range, ] if not isinstance(col_range, Iterable) else col_range
row_range = [row_range, ] if not isinstance(row_range, Iterable) else row_range
if len(col_range) == 1:
index = 0
if len(row_range) == 1:
index = 0
if col_range[index] is not None:
selection = np.logical_and(intersections_local[:, 0] >= col_range[index][0], intersections_local[:, 0] <= col_range[index][1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
if row_range[index] is not None:
selection = np.logical_and(intersections_local[:, 1] >= row_range[index][0], intersections_local[:, 1] <= row_range[index][1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
# Calculate distance between track hit and DUT hit
scale = np.square(np.array((1, 1, 0))) # Regard pixel size for calculating distances
distance = np.sqrt(np.dot(np.square(intersections_local - hits_local), scale)) # Array with distances between DUT hit and track hit for each event. Values in um
total_hit_hist += (np.histogram2d(hits_local_dut[:, 0], hits_local_dut[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
# Calculate purity
pure_hits_local = hits_local[distance < cut_distance]
if not np.any(pure_hits_local):
logging.warning('No pure hits in DUT %d, cannot calculate purity', actual_dut)
continue
total_pure_hit_hist += (np.histogram2d(pure_hits_local[:, 0], pure_hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
purity = np.zeros_like(total_hit_hist)
purity[total_hit_hist != 0] = total_pure_hit_hist[total_hit_hist != 0].astype(np.float) / total_hit_hist[total_hit_hist != 0].astype(np.float) * 100.
purity = np.ma.array(purity, mask=total_hit_hist < minimum_hit_density)
if not np.any(purity):
raise RuntimeError('No pure hit for DUT%d, consider changing cut values or check track building!', actual_dut)
# Calculate distances between hit and intersection
# distance_mean_array = np.average(total_distance_array, axis=2, weights=range(0, max_distance)) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_mean_array = np.ma.masked_invalid(distance_mean_array)
# distance_max_array = np.amax(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_min_array = np.amin(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_max_array = np.ma.masked_invalid(distance_max_array)
# distance_min_array = np.ma.masked_invalid(distance_min_array)
# plot_utils.plot_track_distances(distance_min_array, distance_max_array, distance_mean_array)
plot_utils.purity_plots(total_pure_hit_hist, total_hit_hist, purity, actual_dut, minimum_hit_density, plot_range=dimensions, cut_distance=cut_distance, output_pdf=output_pdf)
logging.info('Purity = %1.4f +- %1.4f', np.ma.mean(purity), np.ma.std(purity))
purities.append(np.ma.mean(purity))
dut_group = out_file_h5.create_group(out_file_h5.root, 'DUT_%d' % actual_dut)
out_purity = out_file_h5.create_carray(dut_group, name='Purity', title='Purity map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(purity.dtype), shape=purity.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_purity_mask = out_file_h5.create_carray(dut_group, name='Purity_mask', title='Masked pixel map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(purity.mask.dtype), shape=purity.mask.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# For correct statistical error calculation the number of pure hits over total hits is needed
out_pure_hits = out_file_h5.create_carray(dut_group, name='Pure_hits', title='Passing events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_pure_hit_hist.dtype), shape=total_pure_hit_hist.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_total_total = out_file_h5.create_carray(dut_group, name='Total_hits', title='Total events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_hit_hist.dtype), shape=total_hit_hist.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
pure_hits.append(total_pure_hit_hist.sum())
total_hits.append(total_hit_hist.sum())
logging.info('Pure hits / total hits: %d / %d, Purity = %.2f', total_pure_hit_hist.sum(), total_hit_hist.sum(), total_pure_hit_hist.sum() / total_hit_hist.sum() * 100)
# Store parameters used for purity calculation
out_purity.attrs.bin_size = bin_size
out_purity.attrs.minimum_hit_density = minimum_hit_density
out_purity.attrs.sensor_size = sensor_size
out_purity.attrs.use_duts = use_duts
out_purity.attrs.max_chi2 = max_chi2
out_purity.attrs.cut_distance = cut_distance
out_purity.attrs.max_distance = max_distance
out_purity.attrs.col_range = col_range
out_purity.attrs.row_range = row_range
out_purity[:] = purity.T
out_purity_mask[:] = purity.mask.T
out_pure_hits[:] = total_pure_hit_hist.T
out_total_total[:] = total_hit_hist.T
if output_pdf is not None:
output_pdf.close()
return purities, pure_hits, total_hits
def histogram_track_angle(input_tracks_file, input_alignment_file=None, output_track_angle_file=None, n_bins="auto", plot_range=(None, None), use_duts=None, dut_names=None, plot=True, chunk_size=499999):
'''Calculates and histograms the track angle of the fitted tracks for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input alignment file.
If None, the DUT planes are assumed to be perpendicular to the z axis.
output_track_angle_file: string
Filename of the output track angle file with track angle histogram and fitted means and sigmas of track angles for selected DUTs.
If None, deduce filename from input tracks file.
n_bins : uint
Number of bins for the histogram.
If "auto", automatic binning is used.
plot_range : iterable of tuples
Tuple of the plot range in rad for alpha and beta angular distribution, e.g. ((-0.01, +0.01), -0.01, +0.01)).
If (None, None), plotting from minimum to maximum.
use_duts : iterable
Calculate the track angle for given DUTs. If None, all duts are used.
dut_names : iterable
Name of the DUTs. If None, DUT numbers will be used.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Calculating track angles ===')
if input_alignment_file:
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
else:
alignment = None
if output_track_angle_file is None:
output_track_angle_file = os.path.splitext(input_tracks_file)[0] + '_track_angles.h5'
with tb.open_file(input_tracks_file, 'r') as in_file_h5:
with tb.open_file(output_track_angle_file, mode="w") as out_file_h5:
nodes = in_file_h5.list_nodes("/")
if not nodes:
return
extended_nodes = nodes[:1]
extended_nodes.extend(nodes)
for index, node in enumerate(extended_nodes): # loop through all DUTs in track table
initialize = True
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if index == 0:
dut_name = None
else:
dut_name = "DUT%d" % actual_dut
if use_duts is not None and actual_dut not in use_duts:
continue
if alignment is not None and index != 0:
rotation_matrix = geometry_utils.rotation_matrix(alpha=alignment[actual_dut]['alpha'],
beta=alignment[actual_dut]['beta'],
gamma=alignment[actual_dut]['gamma'])
basis_global = rotation_matrix.T.dot(np.eye(3))
dut_plane_normal = basis_global[2]
if dut_plane_normal[2] < 0:
dut_plane_normal = -dut_plane_normal
else:
dut_plane_normal = np.array([0.0, 0.0, 1.0])
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): # only store track slopes of selected DUTs
track_slopes = np.column_stack((tracks_chunk['slope_0'],
tracks_chunk['slope_1'],
tracks_chunk['slope_2']))
# TODO: alpha/beta wrt DUT col / row
total_angles = np.arccos(np.inner(dut_plane_normal, track_slopes))
alpha_angles = 0.5 * np.pi - np.arccos(np.inner(track_slopes, np.cross(dut_plane_normal, np.array([1.0, 0.0, 0.0]))))
beta_angles = 0.5 * np.pi - np.arccos(np.inner(track_slopes, np.cross(dut_plane_normal, np.array([0.0, 1.0, 0.0]))))
if initialize:
total_angle_hist, total_angle_hist_edges = np.histogram(total_angles, bins=n_bins, range=None)
alpha_angle_hist, alpha_angle_hist_edges = np.histogram(alpha_angles, bins=n_bins, range=plot_range[1])
beta_angle_hist, beta_angle_hist_edges = np.histogram(beta_angles, bins=n_bins, range=plot_range[0])
initialize = False
else:
total_angle_hist += np.histogram(total_angles, bins=total_angle_hist_edges)[0]
alpha_angle_hist += np.histogram(alpha_angles, bins=alpha_angle_hist_edges)[0]
beta_angle_hist += np.histogram(beta_angles, bins=beta_angle_hist_edges)[0]
# write results
track_angle_total = out_file_h5.create_carray(where=out_file_h5.root,
name='Total_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Total track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(total_angle_hist.dtype),
shape=total_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
track_angle_beta = out_file_h5.create_carray(where=out_file_h5.root,
name='Beta_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Beta track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(beta_angle_hist.dtype),
shape=beta_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
track_angle_alpha = out_file_h5.create_carray(where=out_file_h5.root,
name='Alpha_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Alpha track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(alpha_angle_hist.dtype),
shape=alpha_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# fit histograms for x and y direction
bin_center = (total_angle_hist_edges[1:] + total_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(total_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(total_angle_hist, bin_center)
fit_total, cov = curve_fit(analysis_utils.gauss, bin_center, total_angle_hist, p0=[np.amax(total_angle_hist), mean, rms])
bin_center = (beta_angle_hist_edges[1:] + beta_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(beta_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(beta_angle_hist, bin_center)
fit_beta, cov = curve_fit(analysis_utils.gauss, bin_center, beta_angle_hist, p0=[np.amax(beta_angle_hist), mean, rms])
bin_center = (alpha_angle_hist_edges[1:] + alpha_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(alpha_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(alpha_angle_hist, bin_center)
fit_alpha, cov = curve_fit(analysis_utils.gauss, bin_center, alpha_angle_hist, p0=[np.amax(alpha_angle_hist), mean, rms])
# total
track_angle_total.attrs.edges = total_angle_hist_edges
track_angle_total.attrs.edges = total_angle_hist_edges
track_angle_total.attrs.amp = fit_total[0]
track_angle_total.attrs.mean = fit_total[1]
track_angle_total.attrs.sigma = fit_total[2]
track_angle_total[:] = total_angle_hist
# x
track_angle_beta.attrs.edges = beta_angle_hist_edges
track_angle_beta.attrs.amp = fit_beta[0]
track_angle_beta.attrs.mean = fit_beta[1]
track_angle_beta.attrs.sigma = fit_beta[2]
track_angle_beta[:] = beta_angle_hist
# y
track_angle_alpha.attrs.edges = alpha_angle_hist_edges
track_angle_alpha.attrs.amp = fit_alpha[0]
track_angle_alpha.attrs.mean = fit_alpha[1]
track_angle_alpha.attrs.sigma = fit_alpha[2]
track_angle_alpha[:] = alpha_angle_hist
if plot:
plot_utils.plot_track_angle(input_track_angle_file=output_track_angle_file, output_pdf_file=None, dut_names=dut_names)
| mit |
xubenben/data-science-from-scratch | jun_code/KNN.py | 1 | 4531 | import plot_state_borders as plot_graph
import matplotlib.pyplot as plt
from collections import Counter
def majority_vote(labels):
votes = Counter(labels)
winner,count = votes.most_common(1)[0]
num = len([c for c in votes.values() if c == count])
if num ==1:
return winner
else:
return majority_vote(labels[:-1])
def distance(v1,v2):
return sum([(v1_i-v2_i)**2 for v1_i,v2_i in zip(v1,v2)])
def knn_classify(k,labled_points,new_point):
by_distance = sorted(labled_points,key=lambda (point,_):
distance(point,new_point))
k_n_labels = [label for _,label in by_distance[:k]]
return majority_vote(k_n_labels)
if __name__ == "__main__":
cities=[(-86.75,33.5666666666667,'Python'),(-88.25,30.6833333333333,'Python'),(-112.016666666667,33.4333333333333,'Java'),(-110.933333333333,32.1166666666667,'Java'),(-92.2333333333333,34.7333333333333,'R'),(-121.95,37.7,'R'),(-118.15,33.8166666666667,'Python'),(-118.233333333333,34.05,'Java'),(-122.316666666667,37.8166666666667,'R'),(-117.6,34.05,'Python'),(-116.533333333333,33.8166666666667,'Python'),(-121.5,38.5166666666667,'R'),(-117.166666666667,32.7333333333333,'R'),(-122.383333333333,37.6166666666667,'R'),(-121.933333333333,37.3666666666667,'R'),(-122.016666666667,36.9833333333333,'Python'),(-104.716666666667,38.8166666666667,'Python'),(-104.866666666667,39.75,'Python'),(-72.65,41.7333333333333,'R'),(-75.6,39.6666666666667,'Python'),(-77.0333333333333,38.85,'Python'),(-80.2666666666667,25.8,'Java'),(-81.3833333333333,28.55,'Java'),(-82.5333333333333,27.9666666666667,'Java'),(-84.4333333333333,33.65,'Python'),(-116.216666666667,43.5666666666667,'Python'),(-87.75,41.7833333333333,'Java'),(-86.2833333333333,39.7333333333333,'Java'),(-93.65,41.5333333333333,'Java'),(-97.4166666666667,37.65,'Java'),(-85.7333333333333,38.1833333333333,'Python'),(-90.25,29.9833333333333,'Java'),(-70.3166666666667,43.65,'R'),(-76.6666666666667,39.1833333333333,'R'),(-71.0333333333333,42.3666666666667,'R'),(-72.5333333333333,42.2,'R'),(-83.0166666666667,42.4166666666667,'Python'),(-84.6,42.7833333333333,'Python'),(-93.2166666666667,44.8833333333333,'Python'),(-90.0833333333333,32.3166666666667,'Java'),(-94.5833333333333,39.1166666666667,'Java'),(-90.3833333333333,38.75,'Python'),(-108.533333333333,45.8,'Python'),(-95.9,41.3,'Python'),(-115.166666666667,36.0833333333333,'Java'),(-71.4333333333333,42.9333333333333,'R'),(-74.1666666666667,40.7,'R'),(-106.616666666667,35.05,'Python'),(-78.7333333333333,42.9333333333333,'R'),(-73.9666666666667,40.7833333333333,'R'),(-80.9333333333333,35.2166666666667,'Python'),(-78.7833333333333,35.8666666666667,'Python'),(-100.75,46.7666666666667,'Java'),(-84.5166666666667,39.15,'Java'),(-81.85,41.4,'Java'),(-82.8833333333333,40,'Java'),(-97.6,35.4,'Python'),(-122.666666666667,45.5333333333333,'Python'),(-75.25,39.8833333333333,'Python'),(-80.2166666666667,40.5,'Python'),(-71.4333333333333,41.7333333333333,'R'),(-81.1166666666667,33.95,'R'),(-96.7333333333333,43.5666666666667,'Python'),(-90,35.05,'R'),(-86.6833333333333,36.1166666666667,'R'),(-97.7,30.3,'Python'),(-96.85,32.85,'Java'),(-95.35,29.9666666666667,'Java'),(-98.4666666666667,29.5333333333333,'Java'),(-111.966666666667,40.7666666666667,'Python'),(-73.15,44.4666666666667,'R'),(-77.3333333333333,37.5,'Python'),(-122.3,47.5333333333333,'Python'),(-89.3333333333333,43.1333333333333,'R'),(-104.816666666667,41.15,'Java')]
cities = [([longitude, latitude], language) for longitude, latitude,language in cities]
# cities=[([-122.3 , 47.53], "Python"),
# ([ -96.85, 32.85 ], "Java"),
# ([ -89.33, 43.13 ], "R"),
# ]
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
plot_graph.plot_state_borders(plt)
for (longitude, latitude), language in cities:
plots[language][0].append(longitude)
plots[language][1].append(latitude)
k=5
for longitude in range(-130, -60):
for latitude in range(20, 55):
predicted_language = knn_classify(k, cities, [longitude, latitude])
plots[predicted_language][0].append(longitude)
plots[predicted_language][1].append(latitude)
for language,(x,y) in plots.iteritems():
plt.scatter(x,y,color=colors[language],marker=markers[language],label=language
,zorder=10)
plt.legend(loc=0)
plt.show()
| unlicense |
jereze/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
tiagofrepereira2012/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 62 | 9268 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
piiswrong/mxnet | example/ssd/detect/detector.py | 30 | 7112 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.testdb import TestDB
from dataset.iterator import DetIter
class Detector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
if not isinstance(data_shape, tuple):
data_shape = (data_shape, data_shape)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape[0], data_shape[1]))])
self.mod.set_params(args, auxs)
self.mean_pixels = mean_pixels
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
return result
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect(test_iter, show_timer)
def visualize_detection(self, img, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > thresh:
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
import cv2
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
self.visualize_detection(img, det, classes, thresh)
| apache-2.0 |
pandas-ml/pandas-ml | pandas_ml/skaccessors/test/test_neural_network.py | 2 | 1115 | #!/usr/bin/env python
import pytest
import sklearn.datasets as datasets
import sklearn.neural_network as nn
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestNeuralNtwork(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.neural_network.BernoulliRBM, nn.BernoulliRBM)
self.assertIs(df.neural_network.MLPClassifier, nn.MLPClassifier)
self.assertIs(df.neural_network.MLPRegressor, nn.MLPRegressor)
@pytest.mark.parametrize("algo", ['BernoulliRBM'])
def test_RBM(self, algo):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
mod1 = getattr(df.neural_network, algo)(random_state=self.random_state)
mod2 = getattr(nn, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(digits.data, digits.target)
result = df.transform(mod1)
expected = mod2.transform(digits.data)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.data.values, expected)
| bsd-3-clause |
CalebBell/ht | tests/test_conv_tube_bank.py | 1 | 66185 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from ht import *
import numpy as np
from scipy.interpolate import interp1d, bisplrep, splrep, splev, UnivariateSpline, RectBivariateSpline
from fluids.numerics import assert_close, assert_close1d, assert_close2d, linspace
def test_Nu_Grimison_tube_bank_tcks():
from ht.conv_tube_bank import Grimison_ST_aligned, Grimison_SL_aligned, Grimison_C1_aligned, Grimison_C1_aligned_tck
Grimison_C1_aligned_interp = RectBivariateSpline(Grimison_ST_aligned,
Grimison_SL_aligned,
np.array(Grimison_C1_aligned))
tck_recalc = Grimison_C1_aligned_interp.tck
[assert_close1d(i, j) for i, j in zip(Grimison_C1_aligned_tck, tck_recalc)]
from ht.conv_tube_bank import Grimison_m_aligned_tck, Grimison_m_aligned
Grimison_m_aligned_interp = RectBivariateSpline(Grimison_ST_aligned,
Grimison_SL_aligned,
np.array(Grimison_m_aligned))
tck_recalc = Grimison_m_aligned_interp.tck
[assert_close1d(i, j) for i, j in zip(Grimison_m_aligned_tck, tck_recalc)]
def test_Nu_Grimison_tube_bank():
Nu = Nu_Grimison_tube_bank(Re=10263.37, Pr=.708, tube_rows=11, pitch_normal=.05, pitch_parallel=.05, Do=.025)
assert_close(Nu, 79.07883866010096)
Nu = Nu_Grimison_tube_bank(Re=10263.37, Pr=.708, tube_rows=11, pitch_normal=.07, pitch_parallel=.05, Do=.025)
assert_close(Nu, 79.92721078571385)
Nu = Nu_Grimison_tube_bank(Re=10263.37, Pr=.708, tube_rows=7, pitch_normal=.05, pitch_parallel=.05, Do=.025)
assert_close(Nu, 77.49726188689894)
Nu = Nu_Grimison_tube_bank(Re=10263.37, Pr=.708, tube_rows=7, pitch_normal=.07, pitch_parallel=.05, Do=.025)
assert_close(Nu, 78.32866656999958)
# Test the negative input
args = dict(Re=10263.37, Pr=.708, tube_rows=-1, pitch_normal=.07, pitch_parallel=.05, Do=.025)
Nu_neg = Nu_Grimison_tube_bank(**args)
args['tube_rows'] = 1
Nu_pos = Nu_Grimison_tube_bank(**args)
assert_close(Nu_neg, Nu_pos)
# Check all data - for changing interpolations
Nu_bulk = [[Nu_Grimison_tube_bank(Re=10263.37, Pr=.708, tube_rows=11, pitch_normal=j, pitch_parallel=i, Do=.025) for i in [.025, .04, .05, .75, .1, .15, .2]] for j in [.025, .04, .05, .75, .1, .15, .2]]
Nu_bulk_expect = [[83.05244932418451, 152.02626127499462, 92.67853984384722, 80.45909971688272, 80.45909971688272, 80.45909971688272, 80.45909971688272], [81.37409021240403, 75.87989409125535, 88.19403137832364, 90.10492890754932, 90.10492890754932, 90.10492890754932, 90.10492890754932], [80.154658166616, 79.27931854213506, 79.07883866010096, 88.31182349500988, 88.31182349500988, 88.31182349500988, 88.31182349500988], [73.98350370839236, 76.51020564051443, 78.3597838488104, 79.12612063682283, 86.25920529135, 86.25920529135, 86.25920529135], [73.98350370839236, 76.51020564051443, 78.3597838488104, 86.25920529135, 79.12612063682283, 86.25920529135, 86.25920529135], [73.98350370839236, 76.51020564051443, 78.3597838488104, 86.25920529135, 86.25920529135, 79.12612063682283, 86.25920529135], [73.98350370839236, 76.51020564051443, 78.3597838488104, 86.25920529135, 86.25920529135, 86.25920529135, 79.12612063682283]]
assert_close2d(Nu_bulk, Nu_bulk_expect)
def test_Gimison_coeffs_regeneration():
# These fits are bad, don't check them
# SciPy has warnings for both of them
from ht.conv_tube_bank import (Grimson_SL_staggered, Grimson_ST_staggered,
Grimson_m_staggered, Grimson_C1_staggered,
tck_Grimson_m_staggered, tck_Grimson_C1_staggered)
# tck = bisplrep(Grimson_ST_staggered, Grimson_SL_staggered, Grimson_C1_staggered, kx=1, ky=1, task=0, s=0)
# [assert_close1d(i, j) for i, j in zip(tck, tck_Grimson_C1_staggered)]
#
# tck = bisplrep(Grimson_ST_staggered, Grimson_SL_staggered, Grimson_m_staggered, kx=1, ky=1, task=0, s=0)
# [assert_close1d(i, j) for i, j in zip(tck, tck_Grimson_m_staggered)]
#
def test_ESDU_tube_row_correction():
F2 = ESDU_tube_row_correction(4, staggered=True)
assert_close(F2, 0.8984, rtol=1e-4)
F2 = ESDU_tube_row_correction(6, staggered=False)
assert_close(F2, 0.9551, rtol=1E-4)
# Test all of the inputs work
all_values = [ESDU_tube_row_correction(i, staggered=j) for i in range(12) for j in (True, False)]
def test_ESDU_tube_row_correction_refit():
# Re-fit the data
from ht.conv_tube_bank import ESDU_73031_F2_inline, ESDU_73031_F2_staggered
## Commands used to obtain the fitted data:
ESDU_nrs = [3., 3.0189, 3.04129, 3.04891, 3.06251, 3.06481, 3.08274, 3.09166, 3.10623, 3.11518, 3.12645, 3.13538, 3.14668, 3.16219, 3.1669, 3.18571, 3.1871,
3.20732, 3.20924, 3.23084, 3.23273, 3.25107, 3.25625, 3.27126, 3.27977, 3.29808, 3.30329, 3.32816, 3.33011, 3.35363, 3.37712, 3.40394, 3.42746,
3.45428, 3.47777, 3.48683, 3.50461, 3.51691, 3.5281, 3.54369, 3.55492, 3.56721, 3.57841, 3.59077, 3.6019, 3.61426, 3.62872, 3.63778, 3.6949,
3.80655, 3.80918, 3.93332, 3.99431, 4.10792, 4.20204, 4.36618, 4.47351, 4.56706, 4.60082, 4.7014, 4.78854, 4.808, 4.90913, 4.97601, 5.05637, 5.0589,
5.1132, 5.11574, 5.14332, 5.14582, 5.1701, 5.17593, 5.19692, 5.20601, 5.22704, 5.2361, 5.25709, 5.26621, 5.28711, 5.293, 5.32308, 5.35319, 5.38324,
5.39435, 5.41336, 5.42088, 5.42116, 5.44347, 5.47355, 5.50364, 5.53372, 5.56159, 5.56383, 5.58834, 5.59392, 5.61846, 5.624, 5.64857, 5.65408,
5.67865, 5.68416, 5.70877, 5.71424, 5.73885, 5.74436, 5.76896, 5.77777, 5.79905, 5.80782, 5.82913, 5.8379, 5.85592, 5.86798, 5.88936, 5.89807,
5.92815, 5.95823, 5.98828, 6.01836, 6.02978, 6.04845, 6.06313, 6.08186, 6.09321, 6.11191, 6.1233, 6.14202, 6.15338, 6.17207, 6.18346, 6.20215,
6.21354, 6.23556, 6.24363, 6.26565, 6.27704, 6.2957, 6.30709, 6.32908, 6.33717, 6.35916, 6.36725, 6.38924, 6.41929, 6.42076, 6.45267, 6.48275,
6.5128, 6.52099, 6.54289, 6.55431, 6.57626, 6.58439, 6.60635, 6.6364, 6.66978, 6.69983, 6.71797, 6.72991, 6.74805, 6.76329, 6.78143, 6.79334,
6.81148, 6.82672, 6.84156, 6.8568, 6.87491, 6.89014, 6.90499, 6.9202, 6.93837, 6.95028, 6.96842, 6.98362, 6.99847, 7.01371, 7.03185, 7.04376,
7.0619, 7.07714, 7.09195, 7.11048, 7.14056, 7.17062, 7.17544, 7.19874, 7.20399, 7.23212, 7.23404, 7.26409, 7.2655, 7.29555, 7.29744, 7.3289,
7.33082, 7.35895, 7.36087, 7.389, 7.39425, 7.42234, 7.42427, 7.45239, 7.45432, 7.48574, 7.4877, 7.51579, 7.51772, 7.54914, 7.55109, 7.57919,
7.58114, 7.61253, 7.61449, 7.64454, 7.67792, 7.70794, 7.74132, 7.74599, 7.77134, 7.80471, 7.83473, 7.86811, 7.87284, 7.89813, 7.90273, 7.93148,
7.93275, 7.96153, 7.9661, 7.99487, 7.99612, 8.02493, 8.02947, 8.05827, 8.06281, 8.08829, 8.09283, 8.12164, 8.12618, 8.15502, 8.1562, 8.18503,
8.18951, 8.21838, 8.22286, 8.2484, 8.25288, 8.28178, 8.28619, 8.31509, 8.34514, 8.37849, 8.40851, 8.40956, 8.43958, 8.44186, 8.46957, 8.47187,
8.50291, 8.50522, 8.53524, 8.53623, 8.56625, 8.56859, 8.5986, 8.59956, 8.63192, 8.63288, 8.66286, 8.66527, 8.69529, 8.69621, 8.72863, 8.72953,
8.75865, 8.75951, 8.792, 8.79283, 8.82202, 8.82614, 8.84944, 8.85533, 8.88868, 8.91866, 8.94938, 8.95201, 8.98273, 8.98536, 9.01269, 9.01538,
9.04869, 9.08201, 9.18589, 9.22535, 9.25866, 9.28865, 9.32196, 9.35531, 9.38862, 9.41861, 9.45193, 9.48524, 9.51852, 9.55184, 9.58183, 9.61511,
9.64842, 9.68174, 9.71502, 9.74834, 9.77832, 9.81161, 9.84492, 9.8782, 9.91152, 9.94483, 9.97482
]
ESDU_F_in = [0.847863, 0.847938, 0.848157, 0.848231, 0.849657, 0.8498, 0.850916, 0.851272, 0.851854, 0.852412, 0.853114, 0.853669, 0.854374, 0.85534,
0.855633, 0.856658, 0.856734, 0.857993, 0.858083, 0.859091, 0.859208, 0.86035, 0.860633, 0.861451, 0.861747, 0.862385, 0.862491, 0.862997, 0.863133,
0.864769, 0.866404, 0.86827, 0.869906, 0.871772, 0.873407, 0.874037, 0.874399, 0.874649, 0.874973, 0.875424, 0.875948, 0.876521, 0.877119, 0.877778,
0.878223, 0.878716, 0.87939, 0.879813, 0.882477, 0.88784, 0.887966, 0.892807, 0.895431, 0.900319, 0.903854, 0.910018, 0.914049, 0.917017, 0.918089,
0.921673, 0.92463, 0.925231, 0.928356, 0.929894, 0.932218, 0.932273, 0.933445, 0.93351, 0.934217, 0.934289, 0.934992, 0.935195, 0.935926, 0.936159,
0.936698, 0.936834, 0.93715, 0.937239, 0.937443, 0.937639, 0.938643, 0.939648, 0.940651, 0.941021, 0.940661, 0.940518, 0.941956, 0.942443, 0.943101,
0.943758, 0.944416, 0.945025, 0.945076, 0.94564, 0.945783, 0.946412, 0.946554, 0.947183, 0.947295, 0.947795, 0.947937, 0.948567, 0.948679, 0.949179,
0.94932, 0.949951, 0.95013, 0.950563, 0.950742, 0.951175, 0.951429, 0.95195, 0.952227, 0.952719, 0.952909, 0.953567, 0.954224, 0.954881, 0.955538,
0.955788, 0.95595, 0.956078, 0.956459, 0.95669, 0.95707, 0.957302, 0.957683, 0.957914, 0.958294, 0.958526, 0.958906, 0.959138, 0.959586, 0.95975,
0.960152, 0.960359, 0.96064, 0.960812, 0.961259, 0.961424, 0.961871, 0.962036, 0.962541, 0.963232, 0.963266, 0.963848, 0.964396, 0.964944, 0.965093,
0.965178, 0.965223, 0.96567, 0.965835, 0.966183, 0.966659, 0.967188, 0.967664, 0.967952, 0.968195, 0.968564, 0.968769, 0.969013, 0.969193, 0.969466,
0.969776, 0.970078, 0.97021, 0.970367, 0.970678, 0.97098, 0.971184, 0.971429, 0.971608, 0.971881, 0.97211, 0.972334, 0.972539, 0.972783, 0.972962,
0.973235, 0.973465, 0.973688, 0.97399, 0.974481, 0.974972, 0.975051, 0.97503, 0.975101, 0.975479, 0.975505, 0.97591, 0.975929, 0.976381, 0.976397,
0.976671, 0.9767, 0.977123, 0.977152, 0.977575, 0.977621, 0.977865, 0.977894, 0.978317, 0.978334, 0.978607, 0.978636, 0.979059, 0.979076, 0.979349,
0.979378, 0.979801, 0.979818, 0.980091, 0.980113, 0.980446, 0.980815, 0.981148, 0.981517, 0.981569, 0.981929, 0.982404, 0.982831, 0.983305,
0.983373, 0.98308, 0.983026, 0.983307, 0.983319, 0.983569, 0.983609, 0.983889, 0.983901, 0.984151, 0.984191, 0.984441, 0.984481, 0.984729, 0.984773,
0.985023, 0.985063, 0.985344, 0.985355, 0.985468, 0.985485, 0.985736, 0.985775, 0.986024, 0.986068, 0.98618, 0.986198, 0.986434, 0.986679, 0.986952,
0.987197, 0.987206, 0.987498, 0.987508, 0.987631, 0.987651, 0.987921, 0.98793, 0.988047, 0.988051, 0.988343, 0.988352, 0.98847, 0.988473, 0.988599,
0.988603, 0.988736, 0.988757, 0.989018, 0.989026, 0.989152, 0.989156, 0.989285, 0.989289, 0.989415, 0.989419, 0.989532, 0.989548, 0.989528,
0.989551, 0.989681, 0.989798, 0.989917, 0.98994, 0.990207, 0.990205, 0.99018, 0.990188, 0.990281, 0.990374, 0.990664, 0.990775, 0.990868, 0.990952,
0.991045, 0.991138, 0.991231, 0.991315, 0.991408, 0.991501, 0.991594, 0.991687, 0.991771, 0.991864, 0.991957, 0.99205, 0.992143, 0.992236, 0.99232,
0.992413, 0.992506, 0.992599, 0.992692, 0.992785, 0.992869
]
ESDU_F_st = [0.859287, 0.859485, 0.860059, 0.860415, 0.861049, 0.861156, 0.861887, 0.86225, 0.86293, 0.863347, 0.863962, 0.864448, 0.864841, 0.865382,
0.865602, 0.866479, 0.866544, 0.867487, 0.867576, 0.868439, 0.868514, 0.869369, 0.869611, 0.870311, 0.870708, 0.871562, 0.871805, 0.872672, 0.87274,
0.873837, 0.874774, 0.875709, 0.876806, 0.877741, 0.878678, 0.879047, 0.879772, 0.880263, 0.88071, 0.881253, 0.881644, 0.882135, 0.882582, 0.883075,
0.883519, 0.88395, 0.884454, 0.884812, 0.88707, 0.891483, 0.891577, 0.896007, 0.898184, 0.901839, 0.904867, 0.909992, 0.913054, 0.915723, 0.91661,
0.919254, 0.921545, 0.922057, 0.924542, 0.926185, 0.92816, 0.928222, 0.929395, 0.929449, 0.93001, 0.930061, 0.930684, 0.930833, 0.93126, 0.931445,
0.931873, 0.932057, 0.932595, 0.932829, 0.933433, 0.933604, 0.934216, 0.934988, 0.93544, 0.935724, 0.936212, 0.936404, 0.936412, 0.936983, 0.937596,
0.938208, 0.93882, 0.939534, 0.939591, 0.94009, 0.940204, 0.940703, 0.940816, 0.941316, 0.941428, 0.941928, 0.94204, 0.94254, 0.942652, 0.943282,
0.943424, 0.943872, 0.944033, 0.944353, 0.944485, 0.944919, 0.945097, 0.945464, 0.945709, 0.946144, 0.946321, 0.946933, 0.947545, 0.947998, 0.94861,
0.948842, 0.949222, 0.94949, 0.949831, 0.950002, 0.950283, 0.950575, 0.951055, 0.951226, 0.951507, 0.951739, 0.952119, 0.952327, 0.952729, 0.952893,
0.953341, 0.953512, 0.953793, 0.953946, 0.954242, 0.954407, 0.954854, 0.955019, 0.955466, 0.955919, 0.955939, 0.956368, 0.95698, 0.957433, 0.957599,
0.958045, 0.958198, 0.958494, 0.958659, 0.959106, 0.959558, 0.960008, 0.96046, 0.960829, 0.961072, 0.961317, 0.961522, 0.961795, 0.961974, 0.962218,
0.962423, 0.962725, 0.963035, 0.963193, 0.963325, 0.963549, 0.963777, 0.964147, 0.96439, 0.964547, 0.964679, 0.964981, 0.965291, 0.965564, 0.965744,
0.965988, 0.966193, 0.966322, 0.966483, 0.967095, 0.967547, 0.967612, 0.967926, 0.967996, 0.96842, 0.968449, 0.968901, 0.968913, 0.969174, 0.969191,
0.969614, 0.96964, 0.970064, 0.970093, 0.970471, 0.970542, 0.970816, 0.970835, 0.971258, 0.971287, 0.97171, 0.971736, 0.97201, 0.972029, 0.972452,
0.972478, 0.972901, 0.972931, 0.973203, 0.97322, 0.973673, 0.974122, 0.974415, 0.974864, 0.97491, 0.975157, 0.975606, 0.975899, 0.976348, 0.976394,
0.976641, 0.976681, 0.97693, 0.97695, 0.977383, 0.977422, 0.977672, 0.977691, 0.978125, 0.978164, 0.978414, 0.978459, 0.978707, 0.978746, 0.978997,
0.979058, 0.979446, 0.979457, 0.979739, 0.979778, 0.980028, 0.980072, 0.980321, 0.980381, 0.98077, 0.980788, 0.9809, 0.981353, 0.981642, 0.981935,
0.981944, 0.982205, 0.982225, 0.982495, 0.982517, 0.982787, 0.982807, 0.983099, 0.983108, 0.983369, 0.983389, 0.983682, 0.983685, 0.983812, 0.98382,
0.98408, 0.984101, 0.984394, 0.984402, 0.984684, 0.984692, 0.984976, 0.984984, 0.985266, 0.985274, 0.985559, 0.985575, 0.985666, 0.985689, 0.985978,
0.986111, 0.986378, 0.986401, 0.986668, 0.98669, 0.986957, 0.986983, 0.987113, 0.987243, 0.98796, 0.988233, 0.988363, 0.988496, 0.988626, 0.988915,
0.989045, 0.989178, 0.989308, 0.989438, 0.989408, 0.989538, 0.989671, 0.989641, 0.989771, 0.989901, 0.989872, 0.990001, 0.990134, 0.990105,
0.990235, 0.990205, 0.990335, 0.990465, 0.990598
]
ESDU_in = interp1d(ESDU_nrs, ESDU_F_in)
ESDU_st = interp1d(ESDU_nrs, ESDU_F_st)
inline_factors = [round(float(ESDU_in(i)),4) for i in range(3, 10)]
staggered_factors = [round(float(ESDU_st(i)),4) for i in range(3, 10)]
assert_close1d(inline_factors, ESDU_73031_F2_inline)
assert_close1d(staggered_factors, ESDU_73031_F2_staggered)
#import matplotlib.pyplot as plt
#plt.plot(ESDU_nrs, ESDU_F_in)
#plt.plot(ESDU_nrs, ESDU_F_st)
#
#plt.plot(range(3, 10), inline_factors, 'o')
#plt.plot(range(3, 10), staggered_factors, 'o')
#plt.show()
def test_ESDU_tube_angle_correction():
F3 = ESDU_tube_angle_correction(75)
assert_close(F3, 0.9794139080247666)
# Digitized data from graph
# angles = [19.7349, 20.1856, 20.4268, 20.8778, 21.3597, 21.8404, 22.3523, 22.8326, 23.3148, 23.8252, 24.1867, 24.6375, 25.0891, 25.5697, 26.021, 26.5312, 26.9528, 27.4633, 27.9745, 28.455, 28.8762, 29.3867, 30.0483, 30.5291, 30.9798, 31.4905, 31.9712, 32.4519, 32.9026, 33.3833, 33.8938, 34.3743, 34.855, 35.3655, 35.846, 36.3268, 36.8372, 37.3178, 37.8282, 38.3385, 38.8491, 39.3893, 39.8995, 40.38, 40.9203, 41.4305, 41.9706, 42.511, 43.081, 43.5912, 44.1612, 44.7312, 45.2416, 45.8415, 46.3814, 46.9513, 47.5213, 48.0911, 48.6611, 49.231, 49.8008, 50.4004, 50.9701, 51.5698, 52.1694, 52.7393, 53.3386, 53.9382, 54.5378, 55.1372, 55.7069, 56.3062, 56.9356, 57.5648, 58.1642, 58.7935, 59.4227, 60.0818, 60.711, 61.3103, 61.9694, 62.5986, 63.2573, 63.9163, 64.5752, 65.234, 65.8929, 66.5514, 67.2102, 67.869, 68.5575, 69.2162, 69.9047, 70.5632, 71.2813, 71.94, 72.6284, 73.3464, 74.0346, 74.7526, 75.4706, 76.1587, 76.8765, 77.5944, 78.3122, 79.0299, 79.7476, 80.4952, 81.2129, 81.9305, 82.6479, 83.3952, 84.083, 84.8003, 85.5179, 86.2652, 86.9825, 87.7295, 88.4468, 89.1642, 89.9112, 90]
# F3s = [0.528819, 0.534137, 0.538566, 0.544474, 0.552447, 0.557766, 0.566034, 0.570763, 0.579326, 0.584351, 0.590551, 0.59587, 0.602957, 0.608276, 0.614774, 0.619504, 0.626296, 0.631616, 0.63841, 0.643434, 0.649342, 0.654662, 0.663523, 0.669137, 0.674456, 0.680071, 0.685685, 0.691004, 0.696322, 0.701641, 0.706961, 0.711986, 0.7176, 0.72292, 0.727944, 0.733558, 0.738583, 0.743902, 0.748928, 0.753953, 0.759273, 0.764299, 0.769029, 0.774053, 0.779079, 0.78381, 0.788541, 0.793862, 0.798594, 0.803324, 0.808056, 0.812788, 0.817813, 0.822546, 0.826982, 0.831419, 0.836151, 0.840588, 0.84532, 0.849757, 0.854194, 0.858337, 0.86248, 0.866917, 0.871061, 0.875498, 0.879051, 0.883194, 0.887337, 0.891185, 0.895328, 0.898881, 0.90273, 0.906284, 0.910133, 0.913982, 0.917536, 0.921091, 0.924645, 0.928199, 0.931754, 0.935308, 0.938273, 0.941533, 0.944794, 0.947759, 0.951019, 0.953395, 0.95636, 0.959326, 0.961997, 0.964668, 0.967339, 0.969715, 0.971797, 0.974468, 0.976844, 0.978632, 0.980714, 0.982501, 0.984289, 0.986076, 0.987569, 0.989062, 0.990555, 0.991753, 0.992951, 0.99415, 0.995348, 0.996251, 0.996859, 0.997468, 0.998666, 0.998979, 0.999883, 1, 1, 1, 1, 1, 1, 1]
#
# import matplotlib.pyplot as plt
# import numpy as np
#
# plt.plot(angles, F3s)
# plt.plot(angles, np.sin(np.radians(angles))**0.6)
# plt.show()
def test_Zukauskas_tube_row_correction():
F = Zukauskas_tube_row_correction(4, staggered=True)
assert_close(F, 0.8942)
F = Zukauskas_tube_row_correction(6, staggered=False)
assert_close(F, 0.9465)
def test_Zukauskas_tube_row_correction_refit():
from scipy.interpolate import UnivariateSpline
from ht.conv_tube_bank import Zukauskas_Czs_low_Re_staggered, Zukauskas_Czs_high_Re_staggered, Zukauskas_Czs_inline
# Commands used to obtain the fitted data:
Zukauskas_Cz_Zs = [0.968219, 1.01968, 1.04164, 1.04441, 1.07539, 1.09332, 1.13914, 1.16636, 1.23636, 1.2394, 1.24505, 1.3125, 1.33358, 1.38554, 1.43141, 1.48282, 1.4876, 1.55352, 1.58004, 1.60466, 1.65726, 1.67493, 1.70188, 1.79682, 1.91823, 1.99323, 1.99665, 2.04002, 2.16306, 2.18556, 2.19045, 2.30691, 2.3086, 2.36006, 2.45272, 2.45413, 2.57543, 2.59826, 2.72341, 2.7451, 2.8896, 2.91482, 2.98759, 3.1572, 3.23203, 3.25334, 3.3511, 3.42295, 3.4499, 3.52072, 3.6168, 3.83565, 3.9076, 3.9826, 4.02939, 4.17411, 4.20042, 4.44242, 4.48937, 4.61023, 4.82811, 4.95071, 5.07038, 5.28825, 5.31232, 5.3621, 5.50606, 5.53014, 5.60405, 5.74801, 5.74807, 5.82181, 5.99012, 5.99017, 6.13636, 6.23207, 6.23212, 6.37826, 6.44983, 6.44988, 6.62015, 6.69183, 6.69188, 6.95807, 6.95812, 6.98312, 7.1767, 7.20001, 7.41772, 7.41848, 7.65967, 7.87743, 7.90156, 7.95003, 7.97416, 7.97476, 8.21606, 8.2166, 8.45795, 8.60365, 8.67571, 8.79712, 8.91809, 8.96597, 9.18368, 9.20824, 9.42551, 9.45013, 9.66741, 9.69197, 10.0786, 10.3208, 10.5623, 10.5626, 10.7803, 10.9737, 10.9978, 11.2398, 11.2399, 11.4574, 11.4575, 11.6993, 11.7478, 11.9653, 11.9896, 12.2072, 12.2315, 12.4491, 12.691, 12.7152, 12.9812, 13.2231, 13.2715, 13.465, 13.7068, 13.9246, 13.9487, 14.1905, 14.4324, 14.6743, 14.9161, 14.9887, 15.2305, 15.4724, 15.7142, 15.787, 15.811, 15.8835, 16.0046, 16.0287, 16.2465, 16.3673, 16.4883, 16.5124, 16.706, 16.7301, 16.9477, 16.9479, 17.1897, 17.2138, 17.4315, 17.6734, 17.9152, 17.9636, 18.2054, 18.2055, 18.4473, 18.6891, 18.9068, 18.931, 18.9793, 19.2212, 19.4631, 19.5599, 19.7049, 19.9467, 19.9952]
low_Re_staggered_Cz = [0.828685, 0.831068, 0.832085, 0.832213, 0.833647, 0.834478, 0.836599, 0.83786, 0.8411, 0.841241, 0.841503, 0.845561, 0.84683, 0.849956, 0.852715, 0.855808, 0.856096, 0.859148, 0.860376, 0.861516, 0.863952, 0.864828, 0.866165, 0.870874, 0.876897, 0.880617, 0.880787, 0.882293, 0.886566, 0.887348, 0.887517, 0.89214, 0.892207, 0.894249, 0.897396, 0.897444, 0.901563, 0.902338, 0.906589, 0.907258, 0.911719, 0.912497, 0.914744, 0.91998, 0.92229, 0.922729, 0.92474, 0.926218, 0.926772, 0.928561, 0.930987, 0.936514, 0.938332, 0.940226, 0.940947, 0.943179, 0.943584, 0.946941, 0.947769, 0.9499, 0.95374, 0.955902, 0.957529, 0.960492, 0.96082, 0.961497, 0.962826, 0.963048, 0.96373, 0.965208, 0.965208, 0.965965, 0.967759, 0.96776, 0.969318, 0.969757, 0.969758, 0.970428, 0.970757, 0.970757, 0.971538, 0.972422, 0.972422, 0.975703, 0.975704, 0.976012, 0.978249, 0.978139, 0.977115, 0.977111, 0.977585, 0.978013, 0.97806, 0.978155, 0.978202, 0.978204, 0.97819, 0.97819, 0.979578, 0.980416, 0.980411, 0.980405, 0.981521, 0.981333, 0.980478, 0.980382, 0.981379, 0.981492, 0.981479, 0.981478, 0.982147, 0.982566, 0.982553, 0.982553, 0.98254, 0.981406, 0.98171, 0.98476, 0.984762, 0.98475, 0.98475, 0.984736, 0.984733, 0.985732, 0.985843, 0.986842, 0.986953, 0.985817, 0.986825, 0.986926, 0.986911, 0.987834, 0.988018, 0.988008, 0.987994, 0.991353, 0.991148, 0.98909, 0.9902, 0.990187, 0.991297, 0.991293, 0.991279, 0.991266, 0.992054, 0.992292, 0.99237, 0.992366, 0.992359, 0.992358, 0.993068, 0.993463, 0.993456, 0.993454, 0.994443, 0.994566, 0.994553, 0.994553, 0.99454, 0.994539, 0.996774, 0.99676, 0.996746, 0.996744, 0.99673, 0.99673, 0.997466, 0.998201, 0.998863, 0.998936, 0.99902, 0.999439, 0.999857, 1.00002, 1.00002, 1, 1]
high_Re_staggered_Cz = [0.617923, 0.630522, 0.635897, 0.636344, 0.64134, 0.644232, 0.651621, 0.654452, 0.661728, 0.662045, 0.662632, 0.669643, 0.671835, 0.683767, 0.694302, 0.704706, 0.705673, 0.719014, 0.721221, 0.72327, 0.727649, 0.729119, 0.73359, 0.749337, 0.759443, 0.770509, 0.771014, 0.777413, 0.785006, 0.786394, 0.786756, 0.795376, 0.795545, 0.800697, 0.809975, 0.810062, 0.817547, 0.818955, 0.829084, 0.830839, 0.842534, 0.843935, 0.847977, 0.857398, 0.861555, 0.862739, 0.866619, 0.869471, 0.870563, 0.873432, 0.877325, 0.886614, 0.889668, 0.89251, 0.894282, 0.899765, 0.900781, 0.910119, 0.911931, 0.916595, 0.921077, 0.925619, 0.930052, 0.932064, 0.932286, 0.933053, 0.935273, 0.935644, 0.937165, 0.940127, 0.940128, 0.941835, 0.945731, 0.945731, 0.947081, 0.947964, 0.947965, 0.949465, 0.950199, 0.9502, 0.952562, 0.953557, 0.953558, 0.958036, 0.958037, 0.958267, 0.960054, 0.96027, 0.961381, 0.961388, 0.963615, 0.964614, 0.964725, 0.965472, 0.965844, 0.965847, 0.966954, 0.966957, 0.968064, 0.96956, 0.970299, 0.970762, 0.971224, 0.971406, 0.972518, 0.972516, 0.972504, 0.972617, 0.973614, 0.97368, 0.974715, 0.975264, 0.975811, 0.975814, 0.978048, 0.980033, 0.980281, 0.982515, 0.982515, 0.982502, 0.982503, 0.983612, 0.98361, 0.983597, 0.983709, 0.984707, 0.984819, 0.985817, 0.985804, 0.985896, 0.986911, 0.986898, 0.98712, 0.988008, 0.987994, 0.988994, 0.989104, 0.98909, 0.9902, 0.990187, 0.991297, 0.991293, 0.991279, 0.991266, 0.991252, 0.995742, 0.994903, 0.992366, 0.99573, 0.995729, 0.995716, 0.99571, 0.995703, 0.995826, 0.996814, 0.996813, 0.996801, 0.996801, 0.996787, 0.996786, 0.996774, 0.99676, 0.997682, 0.997867, 0.997854, 0.997854, 0.99784, 0.997826, 0.997814, 0.997813, 0.99781, 0.99892, 0.998907, 0.998901, 0.998893, 0.998879, 0.998877]
inline_Cz = [0.658582, 0.681965, 0.69194, 0.6932, 0.700314, 0.704433, 0.710773, 0.714541, 0.724228, 0.724649, 0.725518, 0.735881, 0.738799, 0.74599, 0.751285, 0.75722, 0.757717, 0.76457, 0.767327, 0.776314, 0.781783, 0.783619, 0.786421, 0.794105, 0.803931, 0.81, 0.810227, 0.813093, 0.821227, 0.822615, 0.822917, 0.830103, 0.830207, 0.833383, 0.839101, 0.839188, 0.847046, 0.848103, 0.853898, 0.854902, 0.862547, 0.863881, 0.868371, 0.875104, 0.878568, 0.879555, 0.884081, 0.886933, 0.888003, 0.890813, 0.894236, 0.902032, 0.904114, 0.906285, 0.907639, 0.910812, 0.911389, 0.916696, 0.917725, 0.92029, 0.924912, 0.927513, 0.930052, 0.934534, 0.934906, 0.935673, 0.937893, 0.938227, 0.939252, 0.941249, 0.94125, 0.942957, 0.946853, 0.946854, 0.948204, 0.949088, 0.949088, 0.950588, 0.951322, 0.951323, 0.953685, 0.954679, 0.95468, 0.959159, 0.95916, 0.959274, 0.960163, 0.96027, 0.961381, 0.961388, 0.963615, 0.96585, 0.966222, 0.966969, 0.966968, 0.966968, 0.966954, 0.966957, 0.968064, 0.96956, 0.970299, 0.970762, 0.971224, 0.971406, 0.972518, 0.972516, 0.972504, 0.972617, 0.973614, 0.973737, 0.975675, 0.976888, 0.978099, 0.9781, 0.979191, 0.98016, 0.980281, 0.982515, 0.982515, 0.982502, 0.982503, 0.983612, 0.98361, 0.983597, 0.983709, 0.984707, 0.984819, 0.985817, 0.985804, 0.985896, 0.986911, 0.986898, 0.98712, 0.988008, 0.987994, 0.988994, 0.989104, 0.98909, 0.9902, 0.990187, 0.991297, 0.991293, 0.991279, 0.991266, 0.991252, 0.995742, 0.994903, 0.992366, 0.99573, 0.995729, 0.995716, 0.99571, 0.995703, 0.995826, 0.996814, 0.996813, 0.996801, 0.996801, 0.996787, 0.996786, 0.996774, 0.99676, 0.997682, 0.997867, 0.997854, 0.997854, 0.99784, 0.997826, 0.997814, 0.997813, 0.99781, 0.99892, 0.998907, 0.998901, 0.998893, 0.998879, 0.998877]
# hand tuned smoothing
Zukauskas_Cz_low_Re_staggered_obj = UnivariateSpline(Zukauskas_Cz_Zs, low_Re_staggered_Cz, s=0.0001)
Zukauskas_Cz_high_Re_staggered_obj = UnivariateSpline(Zukauskas_Cz_Zs, high_Re_staggered_Cz, s=0.0005)
Zukauskas_Cz_inline_obj = UnivariateSpline(Zukauskas_Cz_Zs, inline_Cz, s=0.0005)
Zukauskas_Czs_inline2 = np.round(Zukauskas_Cz_inline_obj(range(1, 20)), 4).tolist()
assert_close1d(Zukauskas_Czs_inline, Zukauskas_Czs_inline2)
Zukauskas_Czs_low_Re_staggered2 = np.round(Zukauskas_Cz_low_Re_staggered_obj(range(1, 20)), 4).tolist()
assert_close1d(Zukauskas_Czs_low_Re_staggered, Zukauskas_Czs_low_Re_staggered2)
Zukauskas_Czs_high_Re_staggered2 = np.round(Zukauskas_Cz_high_Re_staggered_obj(range(1, 20)), 4).tolist()
assert_close1d(Zukauskas_Czs_high_Re_staggered, Zukauskas_Czs_high_Re_staggered2)
def test_Nu_Zukauskas_Bejan():
Nu = Nu_Zukauskas_Bejan(Re=1E4, Pr=7., tube_rows=10, pitch_parallel=.05, pitch_normal=.05)
assert_close(Nu, 175.9202277145248)
Nu = Nu_Zukauskas_Bejan(Re=1E4, Pr=7., tube_rows=10, pitch_parallel=.05, pitch_normal=.05, Pr_wall=9.0)
assert_close(Nu, 165.2074626671159)
Nus = [Nu_Zukauskas_Bejan(Re=Re, Pr=7., tube_rows=30, pitch_parallel=.05, pitch_normal=.05) for Re in (10, 2000, 1E5, 1E7)]
Nus_expect = [4.554889061992833, 65.35035570869223, 768.4207053648229, 26469.71311148279]
assert_close1d(Nus, Nus_expect)
Nus = [Nu_Zukauskas_Bejan(Re=Re, Pr=7., tube_rows=30, pitch_parallel=.05, pitch_normal=.09) for Re in (10, 2000, 1E5, 1E7)]
Nus_expect = [5.263427360525052, 75.85353712516013, 793.1545862201796, 27967.361063088636]
assert_close1d(Nus, Nus_expect)
def test_Nu_ESDU_73031():
Nu = Nu_ESDU_73031(Re=1.32E4, Pr=0.71, tube_rows=8, pitch_parallel=.09, pitch_normal=.05)
assert_close(98.2563319140594, Nu)
Nu = Nu_ESDU_73031(Re=1.32E4, Pr=0.71, tube_rows=8, pitch_parallel=.09, pitch_normal=.05, Pr_wall=0.71)
assert_close(98.2563319140594, Nu)
Nu = Nu_ESDU_73031(Re=1.32E4, Pr=0.71, tube_rows=8, pitch_parallel=.05, pitch_normal=.05, Pr_wall=0.75)
assert_close(87.69324193674449, Nu)
Nu = Nu_ESDU_73031(Re=1.32E4, Pr=0.71, tube_rows=3, pitch_parallel=.05, pitch_normal=.05, Pr_wall=0.75)
assert_close(Nu, 75.57180591337092)
Nus = [Nu_ESDU_73031(Re=Re, Pr=0.71, tube_rows=3, pitch_parallel=pp, pitch_normal=.05, Pr_wall=0.75) for pp in [0.09, 0.05] for Re in [100, 1E5, 1E6]]
Nus_expect = [5.179925804379317, 307.9970377601136, 1481.8545490578865, 4.0177935875859365, 282.40096167747, 1367.860174719831]
assert_close1d(Nus, Nus_expect)
def test_Nu_HEDH_tube_bank():
Nu = Nu_HEDH_tube_bank(Re=1E4, Pr=7., tube_rows=10, pitch_normal=.05, pitch_parallel=.05, Do=.03)
assert_close(Nu, 382.4636554404698)
Nu = Nu_HEDH_tube_bank(Re=10263.37, Pr=.708, tube_rows=11, pitch_normal=.05, pitch_parallel=.05, Do=.025)
assert_close(Nu, 149.18735251017594)
Nu = Nu_HEDH_tube_bank(Re=1E4, Pr=7., tube_rows=5, pitch_normal=.05, pitch_parallel=.05, Do=.03)
assert_close(Nu, 359.0551204831393)
def test_dP_Kern():
from ht.conv_tube_bank import Kern_f_Re
f = [Kern_f_Re(v) for v in linspace(10, 1E6, 10)]
f_values = [6.0155491322862771, 0.19881943524161752, 0.1765198121811164, 0.16032260681398205, 0.14912064432650635, 0.14180674990498099, 0.13727374873569789, 0.13441446600494875, 0.13212172689902535, 0.12928835660421958]
assert_close1d(f, f_values)
dP = dP_Kern(11., 995., 0.000803, 0.584, 0.1524, 0.0254, .019, 22, 0.000657)
assert_close(dP, 18980.58768759033)
dP = dP_Kern(m=11., rho=995., mu=0.000803, DShell=0.584, LSpacing=0.1524, pitch=0.0254, Do=.019, NBaffles=22)
assert_close(dP, 19521.38738647667)
def test_dP_Kern_data():
from ht.conv_tube_bank import Kern_f_Re_tck
_Kern_dP_Res = np.array([9.9524, 11.0349, 12.0786, 13.0504, 14.0121, 15.0431, 16.1511, 17.1176, 17.9105, 18.9822,
19.9879, 21.0484, 22.0217, 23.1893, 24.8973, 26.0495, 27.7862, 29.835, 31.8252, 33.9506, 35.9822, 38.3852,
41.481, 43.9664, 47.2083, 50.6891, 54.0782, 58.0635, 63.5667, 68.2537, 74.247, 78.6957, 83.9573, 90.1511,
95.5596, 102.613, 110.191, 116.806, 128.724, 137.345, 150.384, 161.484, 171.185, 185.031, 196.139, 210.639,
230.653, 250.933, 281.996, 300.884, 329.472, 353.842, 384.968, 408.108, 444.008, 505.513, 560.821, 638.506,
690.227, 741.254, 827.682, 918.205, 1018.63, 1122.76, 1213.62, 1320.38, 1417.94, 1522.93, 1667.69, 1838.11,
2012.76, 2247.44, 2592.21, 2932.18, 3381.87, 3875.42, 4440.83, 5056.16, 5608.95, 6344.58, 7038.48, 8224.34,
9123.83, 10121.7, 11598, 12701.4, 14090, 15938.5, 17452.9, 19112.6, 20929.3, 24614, 29324.6, 34044.8,
37282.2, 42999.9, 50570.2, 55737.9, 59860.6, 65553, 70399.2, 78101.5, 84965.7, 96735.3, 110139, 122977,
136431, 152339, 165740, 180319, 194904, 207981, 223357, 241440, 257621, 283946, 317042, 353996, 408315,
452956, 519041, 590939, 668466, 751216, 827981, 894985, 1012440
])
_Kern_dP_fs = 144.0 * np.array([0.0429177, 0.0382731, 0.0347901, 0.0316208, 0.0298653, 0.0276702, 0.0259671, 0.024523,
0.0237582, 0.0224369, 0.0211881, 0.0202668, 0.0193847, 0.0184234, 0.0172894, 0.0166432, 0.0155182,
0.0147509, 0.0138423, 0.0131572, 0.0124255, 0.0118105, 0.0110842, 0.0106028, 0.0100785, 0.00958019,
0.0092235, 0.00871144, 0.00817649, 0.0077722, 0.00743616, 0.0071132, 0.00684836, 0.00655159, 0.00634789,
0.00611185, 0.00592242, 0.00577517, 0.00552603, 0.00542355, 0.00522267, 0.00502847, 0.00493497, 0.00481301,
0.00469334, 0.00460654, 0.00449314, 0.00438231, 0.00424799, 0.00416922, 0.00406658, 0.00401703, 0.00394314,
0.0038947, 0.00382305, 0.00373007, 0.00368555, 0.00359592, 0.00357512, 0.003509, 0.00344515, 0.00338229,
0.00332057, 0.00328077, 0.00322026, 0.00316102, 0.00308274, 0.00308446, 0.00302787, 0.00297247, 0.0028993,
0.00284654, 0.00277759, 0.0027099, 0.00262738, 0.00256361, 0.00248541, 0.00244055, 0.00238072, 0.0023227,
0.00228032, 0.00222531, 0.00218471, 0.00214484, 0.00206613, 0.00205439, 0.00200402, 0.00196775, 0.00191932,
0.00189622, 0.00186143, 0.00180501, 0.0017393, 0.00170817, 0.00168761, 0.00163622, 0.00158663, 0.0015576,
0.00153862, 0.0015201, 0.00149199, 0.00147418, 0.00142864, 0.00139389, 0.00136874, 0.00133524, 0.00131931,
0.0012953, 0.00127147, 0.00124808, 0.00121724, 0.00121785, 0.00119533, 0.00118082, 0.00116638, 0.00114504,
0.00111702, 0.00108969, 0.00107013, 0.00104389, 0.00101205, 0.000987437, 0.000969567, 0.000939849,
0.000922653, 0.000905634, 0.000894962
])
# # Used in preference over interp1d as saves 30% of execution time, and
# # performs some marginally small amount of smoothing
# # s=0.1 is chosen to have 9 knots, a reasonable amount.
# Kern_f_Re = UnivariateSpline(_Kern_dP_Res, _Kern_dP_fs, s=0.1)
tck = splrep(_Kern_dP_Res, _Kern_dP_fs, s=0.1)
[assert_close1d(i, j) for i, j in zip(Kern_f_Re_tck[:-1], tck[:-1])]
def test_dP_Zukauskas():
# TODO Splines
dP1 = dP_Zukauskas(Re=13943., n=7, ST=0.0313, SL=0.0343, D=0.0164, rho=1.217, Vmax=12.6)
dP2 = dP_Zukauskas(Re=13943., n=7, ST=0.0313, SL=0.0313, D=0.0164, rho=1.217, Vmax=12.6)
assert_close1d([dP1, dP2], [235.22916169118335, 217.0750033117563])
Bell_baffle_configuration_Fcs = np.array([0, 0.0138889, 0.0277778, 0.0416667, 0.0538194, 0.0659722, 0.100694, 0.114583,
0.126736, 0.140625, 0.152778, 0.166667, 0.178819, 0.192708, 0.215278, 0.227431, 0.241319, 0.255208,
0.267361, 0.28125, 0.295139, 0.340278, 0.354167, 0.366319, 0.380208, 0.394097, 0.402778, 0.416667, 0.430556,
0.444444, 0.475694, 0.489583, 0.503472, 0.517361, 0.53125, 0.545139, 0.560764, 0.574653, 0.588542, 0.625,
0.638889, 0.652778, 0.668403, 0.682292, 0.697917, 0.701389, 0.713542, 0.729167, 0.743056, 0.758681,
0.802083, 0.817708, 0.833333, 0.848958, 0.866319, 0.881944, 0.901042, 0.918403, 0.934028, 0.947917,
0.960069, 0.970486, 0.977431, 0.984375, 0.991319, 0.994792, 1
])
Bell_baffle_configuration_Jcs = np.array([0.534317, 0.544632, 0.556665, 0.566983, 0.579014, 0.591045, 0.620271,
0.630589, 0.640904, 0.652937, 0.663252, 0.675286, 0.685601, 0.697635, 0.71483, 0.725145, 0.737179, 0.747497,
0.757812, 0.76813, 0.780163, 0.81627, 0.826588, 0.836903, 0.847221, 0.857539, 0.867848, 0.874734, 0.885052,
0.89537, 0.916012, 0.92633, 0.936648, 0.946966, 0.955568, 0.965886, 0.974492, 0.984809, 0.993412, 1.01578,
1.0261, 1.0347, 1.0433, 1.05362, 1.06223, 1.06052, 1.07083, 1.07944, 1.08804, 1.09664, 1.11731, 1.1242,
1.13109, 1.13798, 1.14487, 1.15004, 1.15522, 1.15354, 1.1467, 1.13815, 1.12787, 1.11588, 1.10388, 1.09017,
1.07474, 1.05759, 1.03015
])
def test_baffle_correction_Bell():
Jc = baffle_correction_Bell(0.82)
assert_close(Jc, 1.1258554691854046, 5e-4)
# Check the match is reasonably good
errs = np.array([(baffle_correction_Bell(float(Fc))-Jc)/Jc for Fc, Jc in zip(Bell_baffle_configuration_Fcs, Bell_baffle_configuration_Jcs)])
assert np.abs(errs).sum()/len(errs) < 1e-3
Jc = baffle_correction_Bell(0.1, 'chebyshev')
assert_close(Jc, 0.61868011359447)
Jc = baffle_correction_Bell(0.82, 'HEDH')
assert_close(Jc, 1.1404)
# Example in spreadsheet 02 - Heat Exchangers, tab Shell htc imperial,
# Rules of Thumb for Chemical Engineers 5E
Jc = baffle_correction_Bell(0.67292816689362900, method='HEDH')
assert_close(1.034508280163413, Jc)
def test_baffle_correction_Bell_fit():
from ht.conv_tube_bank import Bell_baffle_configuration_tck
# 125 us to create.
spl = splrep(Bell_baffle_configuration_Fcs, Bell_baffle_configuration_Jcs, s=8e-5)
[assert_close1d(i, j) for (i, j) in zip(spl[:-1], Bell_baffle_configuration_tck[:-1])]
Bell_baffle_configuration_obj = UnivariateSpline(Bell_baffle_configuration_Fcs,
Bell_baffle_configuration_Jcs,
s=8e-5)
# import matplotlib.pyplot as plt
# plt.plot(Bell_baffle_configuration_Fcs, Bell_baffle_configuration_Jcs)
# pts = np.linspace(0, 1, 5000)
# plt.plot(pts, [Bell_baffle_configuration_obj(i) for i in pts])
# plt.plot(pts, [0.55 + 0.72*i for i in pts]) # Serth and HEDH 3.3.6g misses the tip
# plt.show()
#
Bell_baffle_leakage_x = np.array([0.0, 1e-5, 1e-4, 1e-3, 0.0037779, 0.00885994, 0.012644, 0.0189629, 0.0213694, 0.0241428, 0.0289313, 0.0339093, 0.0376628,
0.0425124, 0.0487152, 0.0523402, 0.0552542, 0.0614631, 0.0676658, 0.0719956, 0.0770838, 0.081302, 0.0885214, 0.0956308, 0.101638, 0.102145,
0.111508, 0.119266, 0.12261, 0.129155, 0.136778, 0.144818, 0.148914, 0.15592, 0.164774, 0.16868, 0.177552, 0.181501, 0.189224, 0.196087, 0.200557,
0.209209, 0.220317, 0.230683, 0.236096, 0.242525, 0.247198, 0.255653, 0.2591, 0.266228, 0.274193, 0.281732, 0.285993, 0.295601, 0.302042, 0.311269,
0.312575, 0.322107, 0.33016, 0.332909, 0.341261, 0.347109, 0.353899, 0.360408, 0.369312, 0.374301, 0.380413, 0.388831, 0.392836, 0.401746, 0.403961,
0.413723, 0.422502, 0.424825, 0.432931, 0.442274, 0.450602, 0.454815, 0.463804, 0.46923, 0.475645, 0.483563, 0.491432, 0.501277, 0.501713, 0.510247,
0.513193, 0.523506, 0.530019, 0.534607, 0.544912, 0.550679, 0.557212, 0.563826, 0.569142, 0.576997, 0.583585, 0.588979, 0.595518, 0.601215,
0.601702, 0.611585, 0.613221, 0.623417, 0.629753, 0.634211, 0.640009, 0.646851, 0.653971, 0.665084, 0.672758, 0.683136, 0.689056, 0.698932,
0.702129, 0.711523, 0.712532, 0.722415, 0.724566, 0.732996, 0.738886, 0.743614
])
Bell_baffle_leakage_z_0 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.982615, 0.962505, 0.952607, 0.939987, 0.935206, 0.930216, 0.922288, 0.91564, 0.910813,
0.904659, 0.896788, 0.892188, 0.889224, 0.883885, 0.879147, 0.875888, 0.872059, 0.868884, 0.86345, 0.858585, 0.854816, 0.854561, 0.849863, 0.846402,
0.844911, 0.841261, 0.837352, 0.833765, 0.831938, 0.828814, 0.824864, 0.823122, 0.819164, 0.817403, 0.813958, 0.810877, 0.80887, 0.804985, 0.799998,
0.795344, 0.792913, 0.790046, 0.787961, 0.78419, 0.782652, 0.779473, 0.776134, 0.773108, 0.771208, 0.766569, 0.762947, 0.758832, 0.758249, 0.753997,
0.750695, 0.749592, 0.746005, 0.743396, 0.740368, 0.737464, 0.733493, 0.731267, 0.728541, 0.723847, 0.721761, 0.717786, 0.716798, 0.712444,
0.708528, 0.707492, 0.703876, 0.699709, 0.695994, 0.694115, 0.690105, 0.687685, 0.684824, 0.681292, 0.677782, 0.672841, 0.672691, 0.669838,
0.668675, 0.662925, 0.66002, 0.657973, 0.653377, 0.651026, 0.648404, 0.645491, 0.64312, 0.639616, 0.636677, 0.634271, 0.633926, 0.628263, 0.628045,
0.623637, 0.622907, 0.618359, 0.615533, 0.613545, 0.611204, 0.608458, 0.605055, 0.599743, 0.596076, 0.591447, 0.588806, 0.584179, 0.582574,
0.578371, 0.577921, 0.573513, 0.572554, 0.568793, 0.566166, 0.564057
])
Bell_baffle_leakage_z_0_25 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.969362, 0.950324, 0.942087, 0.92833, 0.923091, 0.917463, 0.908263, 0.8987, 0.89149,
0.884407, 0.874926, 0.868404, 0.865482, 0.859179, 0.851926, 0.847458, 0.842356, 0.838126, 0.831234, 0.824993, 0.820171, 0.819781, 0.812734,
0.807844, 0.805761, 0.801747, 0.797071, 0.792124, 0.789555, 0.785317, 0.78038, 0.778202, 0.773138, 0.77066, 0.766058, 0.76223, 0.761802, 0.754362,
0.748168, 0.742388, 0.73989, 0.737023, 0.734092, 0.729014, 0.727092, 0.723117, 0.718675, 0.713975, 0.711407, 0.706049, 0.702306, 0.696944, 0.696185,
0.690717, 0.686227, 0.685001, 0.681275, 0.677607, 0.67354, 0.66991, 0.664945, 0.662097, 0.658262, 0.653372, 0.651139, 0.645344, 0.644356, 0.639733,
0.633126, 0.63202, 0.628405, 0.623295, 0.618256, 0.615613, 0.610601, 0.607588, 0.604035, 0.59965, 0.595292, 0.58984, 0.589599, 0.58484, 0.583239,
0.578639, 0.573864, 0.570568, 0.564821, 0.562249, 0.559118, 0.554969, 0.55186, 0.54748, 0.543806, 0.540727, 0.536551, 0.532912, 0.532604, 0.528196,
0.527417, 0.521732, 0.51779, 0.515024, 0.511791, 0.507996, 0.504098, 0.498013, 0.493805, 0.488017, 0.484304, 0.479275, 0.477805, 0.471912, 0.47135,
0.465839, 0.464639, 0.459938, 0.456295, 0.453329
])
Bell_baffle_leakage_z_0_5 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.963548, 0.945291, 0.931697, 0.915513, 0.90935, 0.903126, 0.892379, 0.882357, 0.875546,
0.864662, 0.854734, 0.849292, 0.844917, 0.836477, 0.828194, 0.822412, 0.815618, 0.810932, 0.803692, 0.796563, 0.790539, 0.79003, 0.780769, 0.774098,
0.771163, 0.765418, 0.759681, 0.754353, 0.751784, 0.746512, 0.739848, 0.736908, 0.73023, 0.727831, 0.723525, 0.722361, 0.716403, 0.709498, 0.702106,
0.695343, 0.69183, 0.687797, 0.684784, 0.679126, 0.676934, 0.672463, 0.666468, 0.660794, 0.657587, 0.652229, 0.647674, 0.641886, 0.641039, 0.634661,
0.629571, 0.627846, 0.62156, 0.618, 0.614214, 0.609549, 0.603166, 0.599589, 0.595259, 0.589978, 0.587232, 0.580972, 0.579691, 0.574139, 0.567532,
0.566006, 0.560921, 0.553889, 0.548597, 0.545865, 0.539851, 0.536447, 0.532176, 0.526217, 0.52128, 0.514403, 0.514091, 0.509272, 0.507629, 0.500514,
0.49602, 0.49275, 0.485255, 0.481637, 0.477351, 0.472925, 0.46959, 0.46425, 0.459291, 0.456283, 0.452506, 0.448422, 0.448072, 0.440693, 0.439462,
0.433776, 0.429185, 0.42583, 0.422193, 0.417554, 0.412196, 0.404759, 0.399625, 0.392681, 0.38872, 0.382111, 0.379972, 0.374079, 0.373424, 0.366812,
0.365433, 0.360144, 0.355712, 0.352153
])
Bell_baffle_leakage_z_0_75 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.952164, 0.932054, 0.918775, 0.898166, 0.89158, 0.884084, 0.873337, 0.862164, 0.849748,
0.841023, 0.827707, 0.821818, 0.817167, 0.80764, 0.798123, 0.79148, 0.784355, 0.778722, 0.769082, 0.759588, 0.752322, 0.751711, 0.742222, 0.733336,
0.730403, 0.724627, 0.716983, 0.709925, 0.706578, 0.701202, 0.694408, 0.691425, 0.684748, 0.681776, 0.675771, 0.66987, 0.665387, 0.658756, 0.650396,
0.642594, 0.63852, 0.633681, 0.630164, 0.623243, 0.620278, 0.614914, 0.608773, 0.60289, 0.599564, 0.592066, 0.587039, 0.580086, 0.579103, 0.571929,
0.565004, 0.562871, 0.556585, 0.552183, 0.547073, 0.542175, 0.535473, 0.531288, 0.526005, 0.518616, 0.51564, 0.509016, 0.507369, 0.500113, 0.493584,
0.491836, 0.485642, 0.477775, 0.471507, 0.468336, 0.461571, 0.457487, 0.452093, 0.445202, 0.43798, 0.428108, 0.42792, 0.424141, 0.421924, 0.414162,
0.409255, 0.405735, 0.397827, 0.393402, 0.387784, 0.382579, 0.378578, 0.372665, 0.367707, 0.363647, 0.359545, 0.35391, 0.353453, 0.346015, 0.344783,
0.336861, 0.331413, 0.328057, 0.323694, 0.318544, 0.312629, 0.303584, 0.297808, 0.289997, 0.285542, 0.279346, 0.275077, 0.267704, 0.266945,
0.259507, 0.257888, 0.251468, 0.246404, 0.242337
])
Bell_baffle_leakage_z_1 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.934094, 0.899408, 0.88689, 0.864752, 0.855175, 0.846, 0.832233, 0.820088, 0.811664, 0.80142,
0.78835, 0.781726, 0.776483, 0.765313, 0.755385, 0.749603, 0.742282, 0.73553, 0.725889, 0.716396, 0.709586, 0.709021, 0.698593, 0.690044, 0.686691,
0.680127, 0.67261, 0.665553, 0.662016, 0.655963, 0.648118, 0.644201, 0.635976, 0.63258, 0.626768, 0.621075, 0.617208, 0.609726, 0.60012, 0.591542,
0.587468, 0.582048, 0.578005, 0.570583, 0.567619, 0.561455, 0.554463, 0.548789, 0.545376, 0.537063, 0.531409, 0.523465, 0.522319, 0.51407, 0.508009,
0.50576, 0.49867, 0.493705, 0.487867, 0.482187, 0.474436, 0.470146, 0.465546, 0.458712, 0.455268, 0.43738, 0.445348, 0.436779, 0.429227, 0.42748,
0.42131, 0.413109, 0.406841, 0.403448, 0.395933, 0.391023, 0.38459, 0.37863, 0.372074, 0.361413, 0.360967, 0.356072, 0.353486, 0.344855, 0.339953,
0.336008, 0.327035, 0.322011, 0.316393, 0.310588, 0.306074, 0.299769, 0.294481, 0.290318, 0.285397, 0.279684, 0.279195, 0.271757, 0.27035, 0.261526,
0.255964, 0.252562, 0.248199, 0.241338, 0.234956, 0.226232, 0.219861, 0.210806, 0.205706, 0.197321, 0.194638, 0.187568, 0.186719, 0.178183,
0.176295, 0.168945, 0.16388, 0.160321
])
Bell_baffle_leakage_zs = np.array([Bell_baffle_leakage_z_0, Bell_baffle_leakage_z_0_25, Bell_baffle_leakage_z_0_5, Bell_baffle_leakage_z_0_75, Bell_baffle_leakage_z_1]).T
Bell_baffle_leakage_z_values = np.array([0, .25, .5, .75, 1])
Bell_baffle_leakage_obj = RectBivariateSpline(Bell_baffle_leakage_x, Bell_baffle_leakage_z_values, Bell_baffle_leakage_zs, kx=3, ky=1, s=0.002)
def test_baffle_leakage_Bell():
Jl = baffle_leakage_Bell(1, 1, 4)
assert_close(Jl, 0.5159239501898142, rtol=1e-3)
Jl = baffle_leakage_Bell(1, 1, 8)
assert_close(Jl, 0.6820523047494141, rtol=1e-3)
Jl = baffle_leakage_Bell(1, 3, 8)
assert_close(Jl, 0.5906621282470395, rtol=1e-3)
# Silent clipping
Jl = baffle_leakage_Bell(1, .0001, .00001)
assert_close(Jl, 0.16072739052053492)
Jl = baffle_leakage_Bell(1, 3, 8, method='HEDH')
assert_close(Jl, 0.5530236260777133)
# Example in spreadsheet 02 - Heat Exchangers, tab Shell htc imperial,
# Rules of Thumb for Chemical Engineers 5E
# Has an error
Jl = baffle_leakage_Bell(Ssb=5.5632369907320000000, Stb=4.7424109055909500, Sm=42.7842616174504, method='HEDH')
assert_close(Jl, 0.6719386427830639)
def test_baffle_leakage_Bell_refit():
from ht.conv_tube_bank import Bell_baffle_leakage_tck
# Test refitting the data
obj = RectBivariateSpline(Bell_baffle_leakage_x, Bell_baffle_leakage_z_values, Bell_baffle_leakage_zs, kx=3, ky=1, s=0.002)
new_tck = obj.tck + obj.degrees
[assert_close1d(i, j) for (i, j) in zip(Bell_baffle_leakage_tck[:-2], new_tck[:-2])]
#import matplotlib.pyplot as plt
#for ys in Bell_baffle_leakage_zs.T:
# plt.plot(Bell_baffle_leakage_x, ys)
#for z in Bell_baffle_leakage_z_values:
# xs = np.linspace(min(Bell_baffle_leakage_x), max(Bell_baffle_leakage_x), 1000)
# ys = np.clip(Bell_baffle_leakage_obj(xs, z), 0, 1)
# plt.plot(xs, ys, '--')
#
#for z in Bell_baffle_leakage_z_values:
# xs = np.linspace(min(Bell_baffle_leakage_x), max(Bell_baffle_leakage_x), 1000)
# rs = z
# rl = xs
# ys = 0.44*(1.0 - rs) + (1.0 - 0.44*(1.0 - rs))*np.exp(-2.2*rl)
# plt.plot(xs, ys, '--')
def test_bundle_bypassing_Bell():
Jb = bundle_bypassing_Bell(0.5, 5, 25)
assert_close(Jb, 0.8469611760884599, rtol=1e-3)
Jb = bundle_bypassing_Bell(0.5, 5, 25, laminar=True)
assert_close(Jb, 0.8327442867825271, rtol=1e-3)
Jb = bundle_bypassing_Bell(0.99, 5, 25, laminar=True)
assert_close(Jb, 0.7786963825447165, rtol=1e-3)
Jb = bundle_bypassing_Bell(0.5, 5, 25, method='HEDH')
assert_close(Jb, 0.8483210970579099)
Jb = bundle_bypassing_Bell(0.5, 5, 25, method='HEDH', laminar=True)
assert_close(0.8372305924553625, Jb)
# Example in spreadsheet 02 - Heat Exchangers, tab Shell htc imperial,
# Rules of Thumb for Chemical Engineers 5E
Jb = bundle_bypassing_Bell(bypass_area_fraction=0.331946755407654, seal_strips=2, crossflow_rows=10.6516290726817, method='HEDH')
assert_close(Jb, 0.8908547260332952)
Bell_bundle_bypass_x = np.array([0.0, 1e-5, 1e-4, 1e-3, 0.0388568, 0.0474941, 0.0572083, 0.0807999, 0.0915735, 0.0959337, 0.118724, 0.128469, 0.134716,
0.142211, 0.146821, 0.156504, 0.162821, 0.169488, 0.178126, 0.185301, 0.194997, 0.200798, 0.210512, 0.212373, 0.221063, 0.222122, 0.228864,
0.232856, 0.238578, 0.242605, 0.250104, 0.257958, 0.262866, 0.268403, 0.273639, 0.280289, 0.284999, 0.291067, 0.295186, 0.30005, 0.309764, 0.312548,
0.31468, 0.320144, 0.323405, 0.328111, 0.33213, 0.333111, 0.33857, 0.341836, 0.343889, 0.349352, 0.351401, 0.35359, 0.359058, 0.361102, 0.366408,
0.370597, 0.375601, 0.379541, 0.382811, 0.386913, 0.392363, 0.39766, 0.401106, 0.401841, 0.410811, 0.412615, 0.419939, 0.421633, 0.42633, 0.431067,
0.434967, 0.440908, 0.444682, 0.450614, 0.45373, 0.457036, 0.462565, 0.464508, 0.47016, 0.47227, 0.477519, 0.480474, 0.482794, 0.486874, 0.490639,
0.492758, 0.499075, 0.501281, 0.506824, 0.5116, 0.51494, 0.52159, 0.52187, 0.530498, 0.532368, 0.537013, 0.541276, 0.542244, 0.546385, 0.551805,
0.553801, 0.5575, 0.562325, 0.56668, 0.568283, 0.572153, 0.576377, 0.580676, 0.582252, 0.5886, 0.591953, 0.599019, 0.601715, 0.602385, 0.610103,
0.612441, 0.613194, 0.62061, 0.622146, 0.622934, 0.630324, 0.631852, 0.633669, 0.637109, 0.64136, 0.644447, 0.647887, 0.649879, 0.652335, 0.656363,
0.657593, 0.661839, 0.665333, 0.667924, 0.672258, 0.674841, 0.678694, 0.681955, 0.685396, 0.688789, 0.69198, 0.69532
])
Bell_bundle_bypass_x_max = float(Bell_bundle_bypass_x[-1])
Bell_bundle_bypass_z_values = np.array([0.0, 0.05, 0.1, 1.0 / 6.0, 0.3, 0.5])
Bell_bundle_bypass_z_high_0_5 = np.ones(144)
Bell_bundle_bypass_z_high_0_3 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.990537, 0.988984, 0.98724, 0.983016, 0.980614, 0.979535, 0.974346, 0.972054, 0.970522,
0.968688, 0.967675, 0.965549, 0.964164, 0.963959, 0.963171, 0.961603, 0.959253, 0.959162, 0.957048, 0.956644, 0.954757, 0.954523, 0.9529, 0.95197,
0.950734, 0.949953, 0.951574, 0.949936, 0.947587, 0.946396, 0.945271, 0.943845, 0.942835, 0.941537, 0.940656, 0.940788, 0.942546, 0.940563,
0.939047, 0.935797, 0.935104, 0.934105, 0.933252, 0.933045, 0.931888, 0.931164, 0.930682, 0.9294, 0.929485, 0.929948, 0.931104, 0.931397, 0.928907,
0.926946, 0.925893, 0.925065, 0.924344, 0.923388, 0.922149, 0.92104, 0.92032, 0.920166, 0.918293, 0.917917, 0.917341, 0.917207, 0.916838, 0.916466,
0.916159, 0.915693, 0.915397, 0.914931, 0.914687, 0.914428, 0.913994, 0.913842, 0.91334, 0.912902, 0.911815, 0.911203, 0.91078, 0.910038, 0.909353,
0.908968, 0.907821, 0.907421, 0.906416, 0.905551, 0.904947, 0.903745, 0.903694, 0.902137, 0.9018, 0.900963, 0.900195, 0.900021, 0.899276, 0.898303,
0.897944, 0.897281, 0.896416, 0.895636, 0.895349, 0.894656, 0.893901, 0.893133, 0.892852, 0.89172, 0.891122, 0.889865, 0.889385, 0.889266, 0.887895,
0.88748, 0.887347, 0.887002, 0.887002, 0.887002, 0.886113, 0.885805, 0.88544, 0.884748, 0.883894, 0.883275, 0.882575, 0.882132, 0.881585, 0.880689,
0.880426, 0.879577, 0.878879, 0.878362, 0.878362, 0.878362, 0.878362, 0.877712, 0.877026, 0.87635, 0.875715, 0.875051
])
Bell_bundle_bypass_z_high_0_167 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.98326, 0.97947, 0.974498, 0.962528, 0.957986, 0.956693, 0.949964, 0.947102, 0.945271,
0.94206, 0.94009, 0.935965, 0.93353, 0.932117, 0.928823, 0.925995, 0.923086, 0.921351, 0.918452, 0.917897, 0.915313, 0.914999, 0.913, 0.911818,
0.910127, 0.90895, 0.907403, 0.905106, 0.903391, 0.90146, 0.899637, 0.897328, 0.895696, 0.893598, 0.892176, 0.8905, 0.886812, 0.885691, 0.884834,
0.882399, 0.880948, 0.879769, 0.878966, 0.87877, 0.87685, 0.875407, 0.874501, 0.873182, 0.872775, 0.872342, 0.870581, 0.869774, 0.86768, 0.865848,
0.863665, 0.862771, 0.862131, 0.861322, 0.859193, 0.857129, 0.859086, 0.858609, 0.852897, 0.852509, 0.850934, 0.85034, 0.848528, 0.846705, 0.845041,
0.842545, 0.841823, 0.840689, 0.839677, 0.838418, 0.836305, 0.835485, 0.833106, 0.832278, 0.831286, 0.830728, 0.830291, 0.828583, 0.827011,
0.826114, 0.823157, 0.822169, 0.82102, 0.820047, 0.819426, 0.818189, 0.818085, 0.814886, 0.814194, 0.812289, 0.810543, 0.810058, 0.806263, 0.806263,
0.806263, 0.806137, 0.804373, 0.802783, 0.802256, 0.801473, 0.800619, 0.799812, 0.799526, 0.798328, 0.796926, 0.793982, 0.792861, 0.792583,
0.789808, 0.78897, 0.788701, 0.787226, 0.786921, 0.786757, 0.784122, 0.783578, 0.782932, 0.781709, 0.780202, 0.779109, 0.778433, 0.778042, 0.77756,
0.776422, 0.775988, 0.774494, 0.77333, 0.772824, 0.77198, 0.771442, 0.770094, 0.768954, 0.767753, 0.766571, 0.765461, 0.764301
])
Bell_bundle_bypass_z_high_0_1 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.978035, 0.974378, 0.970282, 0.960405, 0.955928, 0.953958, 0.941171, 0.935756, 0.932301,
0.928172, 0.925642, 0.92035, 0.916913, 0.9133, 0.908641, 0.904789, 0.899741, 0.89745, 0.893627, 0.892897, 0.889494, 0.88908, 0.886716, 0.885913,
0.884594, 0.881903, 0.877493, 0.874369, 0.87224, 0.869806, 0.867741, 0.865076, 0.863023, 0.86048, 0.858872, 0.856977, 0.853205, 0.851584, 0.850211,
0.846705, 0.845452, 0.843647, 0.842058, 0.841641, 0.839327, 0.837996, 0.837215, 0.835141, 0.834364, 0.833443, 0.831147, 0.830291, 0.828293,
0.826718, 0.824687, 0.82305, 0.821515, 0.819223, 0.816189, 0.814075, 0.812703, 0.81241, 0.808849, 0.808135, 0.805242, 0.804574, 0.802726, 0.800866,
0.799338, 0.797016, 0.795545, 0.793199, 0.791952, 0.790633, 0.78865, 0.787955, 0.785378, 0.784125, 0.781018, 0.779971, 0.779149, 0.777707, 0.776379,
0.775632, 0.77341, 0.77338, 0.770144, 0.767521, 0.766358, 0.764048, 0.763944, 0.760626, 0.759946, 0.758344, 0.756878, 0.756543, 0.754964, 0.752903,
0.752217, 0.750955, 0.749311, 0.74768, 0.747075, 0.745618, 0.743505, 0.741332, 0.740537, 0.738255, 0.737132, 0.731632, 0.729296, 0.729296, 0.729296,
0.728522, 0.728273, 0.725825, 0.725318, 0.725059, 0.72263, 0.722122, 0.72146, 0.720209, 0.718666, 0.71766, 0.716539, 0.715891, 0.715086, 0.713635,
0.713192, 0.711666, 0.708853, 0.706773, 0.705828, 0.705414, 0.704797, 0.703715, 0.702494, 0.701293, 0.700165, 0.698986
])
Bell_bundle_bypass_z_high_0_05 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.972281, 0.967922, 0.961369, 0.943692, 0.935729, 0.932525, 0.915956, 0.908961,
0.906104, 0.904563, 0.901473, 0.895196, 0.891354, 0.885977, 0.87906, 0.874187, 0.86913, 0.86655, 0.862245, 0.861423, 0.857594, 0.857129, 0.852769,
0.850462, 0.848255, 0.846705, 0.842424, 0.837963, 0.835187, 0.832066, 0.829126, 0.825407, 0.822783, 0.819415, 0.817095, 0.814308, 0.808771, 0.80719,
0.805982, 0.802895, 0.801058, 0.798414, 0.796163, 0.795615, 0.79257, 0.79081, 0.789705, 0.786773, 0.785555, 0.784255, 0.781018, 0.780293, 0.778416,
0.776757, 0.773823, 0.77152, 0.769804, 0.767657, 0.764814, 0.76206, 0.760275, 0.759852, 0.754714, 0.753788, 0.750038, 0.749171, 0.746514, 0.743844,
0.742476, 0.740476, 0.738142, 0.733741, 0.732227, 0.731129, 0.729296, 0.728224, 0.725118, 0.723961, 0.721379, 0.719929, 0.718793, 0.716592,
0.714554, 0.71341, 0.709585, 0.708255, 0.706445, 0.704915, 0.703256, 0.699727, 0.699579, 0.694462, 0.693873, 0.692411, 0.691072, 0.690566, 0.688406,
0.685632, 0.684701, 0.682979, 0.68071, 0.678471, 0.677649, 0.675704, 0.673763, 0.671794, 0.671073, 0.668927, 0.667797, 0.664237, 0.662887, 0.662584,
0.659112, 0.658063, 0.657689, 0.65401, 0.65325, 0.652861, 0.649222, 0.648472, 0.647937, 0.646926, 0.645678, 0.64442, 0.642745, 0.641777, 0.640586,
0.638832, 0.638297, 0.636454, 0.634836, 0.633593, 0.631519, 0.630382, 0.628731, 0.627336, 0.626066, 0.624995, 0.62399, 0.622939
])
Bell_bundle_bypass_z_high_0 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.952236, 0.940656, 0.929217, 0.902172, 0.890997, 0.886514, 0.863444, 0.851755, 0.845079,
0.837139, 0.832293, 0.822203, 0.816984, 0.810801, 0.80192, 0.794615, 0.78485, 0.779066, 0.769592, 0.767791, 0.759517, 0.758605, 0.752824, 0.749047,
0.743669, 0.739906, 0.73295, 0.725735, 0.722154, 0.717987, 0.713174, 0.707108, 0.702842, 0.697384, 0.693703, 0.689382, 0.680999, 0.678318, 0.676273,
0.671537, 0.669333, 0.666165, 0.662801, 0.661983, 0.657447, 0.654748, 0.653057, 0.648578, 0.646907, 0.645126, 0.640517, 0.638664, 0.634016,
0.631344, 0.628167, 0.625058, 0.622488, 0.619125, 0.614363, 0.610288, 0.607796, 0.607265, 0.60083, 0.599544, 0.59421, 0.592943, 0.589445, 0.585503,
0.582277, 0.577936, 0.575196, 0.571767, 0.569973, 0.567464, 0.563036, 0.561619, 0.557635, 0.556155, 0.55249, 0.550438, 0.548878, 0.546625, 0.544554,
0.543231, 0.538071, 0.536281, 0.532469, 0.529276, 0.527497, 0.523935, 0.52375, 0.518089, 0.516762, 0.513373, 0.51047, 0.509884, 0.507382, 0.504126,
0.502932, 0.500727, 0.497867, 0.495143, 0.494144, 0.491733, 0.488799, 0.485831, 0.484868, 0.481006, 0.479285, 0.476413, 0.473514, 0.472869,
0.469205, 0.468011, 0.467512, 0.462626, 0.461732, 0.461273, 0.457, 0.456012, 0.45484, 0.452628, 0.450352, 0.448953, 0.447398, 0.446281, 0.444731,
0.442201, 0.44145, 0.439096, 0.437168, 0.435842, 0.433942, 0.432813, 0.430923, 0.429157, 0.427301, 0.425479, 0.423772, 0.421993
])
Bell_bundle_bypass_z_high = np.array([Bell_bundle_bypass_z_high_0, Bell_bundle_bypass_z_high_0_05, Bell_bundle_bypass_z_high_0_1, Bell_bundle_bypass_z_high_0_167, Bell_bundle_bypass_z_high_0_3, Bell_bundle_bypass_z_high_0_5]).T
Bell_bundle_bypass_high_obj = RectBivariateSpline(Bell_bundle_bypass_x, Bell_bundle_bypass_z_values, Bell_bundle_bypass_z_high, kx = 3, ky = 3, s = 0.0007)
#import matplotlib.pyplot as plt
#for ys in Bell_bundle_bypass_z_high.T:
# plt.plot(Bell_bundle_bypass_x, ys)
#for z in Bell_bundle_bypass_z_values:
# xs = np.linspace(min(Bell_bundle_bypass_x), max(Bell_bundle_bypass_x), 1000)
# ys = np.clip(Bell_bundle_bypass_high_obj(xs, z), 0, 1)
# plt.plot(xs, ys, '--')
#for z in Bell_bundle_bypass_z_values:
# xs = np.linspace(min(Bell_bundle_bypass_x), max(Bell_bundle_bypass_x), 1000)
# ys = np.exp(-1.25*xs*(1.0 - (2.0*z)**(1/3.) )) # This one is a good fit!
# plt.plot(xs, ys, '.')
#plt.show()
Bell_bundle_bypass_z_low_0_5 = Bell_bundle_bypass_z_high_0_5
Bell_bundle_bypass_z_low_0_3 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.991796, 0.989982, 0.987945, 0.983016, 0.980614, 0.979535, 0.974346, 0.972054, 0.970522,
0.968688, 0.967675, 0.965549, 0.964164, 0.963959, 0.963171, 0.961603, 0.959253, 0.959162, 0.957048, 0.956644, 0.954757, 0.954523, 0.9529, 0.95197,
0.950734, 0.949953, 0.951574, 0.949936, 0.947587, 0.946396, 0.945271, 0.943845, 0.942835, 0.941537, 0.940656, 0.940788, 0.942546, 0.940563,
0.939047, 0.935797, 0.935104, 0.934105, 0.933252, 0.933045, 0.931888, 0.931164, 0.930682, 0.9294, 0.929485, 0.929948, 0.931104, 0.931397, 0.928907,
0.926946, 0.925893, 0.925065, 0.924344, 0.923388, 0.922112, 0.920852, 0.920034, 0.919859, 0.917732, 0.917305, 0.915572, 0.915172, 0.914063,
0.912946, 0.912028, 0.910631, 0.909744, 0.908352, 0.907622, 0.906848, 0.905555, 0.905101, 0.903781, 0.903289, 0.902066, 0.901379, 0.900839,
0.899919, 0.899149, 0.898717, 0.897483, 0.897083, 0.89608, 0.895216, 0.894613, 0.893412, 0.893362, 0.891807, 0.89147, 0.890635, 0.889868, 0.889694,
0.888923, 0.887829, 0.887427, 0.886681, 0.88571, 0.884834, 0.884477, 0.883613, 0.882672, 0.881954, 0.881691, 0.880632, 0.880073, 0.878897, 0.878448,
0.878332, 0.876793, 0.876328, 0.876178, 0.874703, 0.874398, 0.874242, 0.872631, 0.872295, 0.871914, 0.871488, 0.870962, 0.87058, 0.870155, 0.869909,
0.869486, 0.868691, 0.868448, 0.867611, 0.866922, 0.866412, 0.86556, 0.864996, 0.864155, 0.863444, 0.86277, 0.862105, 0.86148, 0.860827
])
Bell_bundle_bypass_z_low_0_167 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.980974, 0.977905, 0.97446, 0.966143, 0.962368, 0.960686, 0.951112, 0.947048, 0.944452,
0.941346, 0.939441, 0.935452, 0.93269, 0.92946, 0.925293, 0.921845, 0.917207, 0.914443, 0.909833, 0.908959, 0.905975, 0.905612, 0.903304, 0.90194,
0.899989, 0.898618, 0.896071, 0.893412, 0.891754, 0.889887, 0.887916, 0.885239, 0.883183, 0.880493, 0.879259, 0.877803, 0.874903, 0.874074,
0.873134, 0.870731, 0.869578, 0.868649, 0.867856, 0.867642, 0.865256, 0.863831, 0.862988, 0.860849, 0.860049, 0.859186, 0.856524, 0.855531,
0.852959, 0.852139, 0.851171, 0.84986, 0.848459, 0.846705, 0.844612, 0.842583, 0.841212, 0.840919, 0.837359, 0.836645, 0.833751, 0.833084, 0.831743,
0.830749, 0.829968, 0.828849, 0.827989, 0.825515, 0.824217, 0.822981, 0.820918, 0.820193, 0.817941, 0.817102, 0.815018, 0.813871, 0.813014,
0.811509, 0.810137, 0.809474, 0.8075, 0.806811, 0.805085, 0.8036, 0.802563, 0.800503, 0.800417, 0.797752, 0.797175, 0.795746, 0.794422, 0.794073,
0.792581, 0.790633, 0.789837, 0.788364, 0.786627, 0.785849, 0.785563, 0.784873, 0.783229, 0.781532, 0.780917, 0.778551, 0.777304, 0.774683,
0.773686, 0.773438, 0.772069, 0.771659, 0.771527, 0.768654, 0.768059, 0.767753, 0.765181, 0.764651, 0.76402, 0.76335, 0.762532, 0.76154, 0.75956,
0.758417, 0.757994, 0.757301, 0.757089, 0.75611, 0.754779, 0.753793, 0.752544, 0.752102, 0.751445, 0.750747, 0.749575, 0.748421, 0.747337, 0.746205
])
Bell_bundle_bypass_z_low_0_1 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.978947, 0.974857, 0.970278, 0.959247, 0.954251, 0.952236, 0.938267, 0.932356, 0.928587,
0.924085, 0.921326, 0.915559, 0.911816, 0.907882, 0.902811, 0.89862, 0.892988, 0.889635, 0.885582, 0.884834, 0.879037, 0.878345, 0.87566, 0.874074,
0.87124, 0.869251, 0.86556, 0.862478, 0.860473, 0.858072, 0.85515, 0.850859, 0.849041, 0.846705, 0.844334, 0.841542, 0.835995, 0.834411, 0.833976,
0.832942, 0.832325, 0.829367, 0.82685, 0.826237, 0.824191, 0.82297, 0.822203, 0.81994, 0.819093, 0.818189, 0.815149, 0.814015, 0.81124, 0.809258,
0.806898, 0.805045, 0.80351, 0.801588, 0.799042, 0.796575, 0.794975, 0.794634, 0.791377, 0.790729, 0.787823, 0.78715, 0.784863, 0.782599, 0.781214,
0.779109, 0.776888, 0.77341, 0.772317, 0.771158, 0.769179, 0.768425, 0.766237, 0.765263, 0.762533, 0.761, 0.759834, 0.757882, 0.756085, 0.755076,
0.752075, 0.751029, 0.749142, 0.747519, 0.746277, 0.743778, 0.743677, 0.740769, 0.74014, 0.737582, 0.735207, 0.73467, 0.733289, 0.731487, 0.730713,
0.728963, 0.726686, 0.724636, 0.723901, 0.722489, 0.720951, 0.719026, 0.718255, 0.715157, 0.713949, 0.711376, 0.710288, 0.710018, 0.706915,
0.705978, 0.705676, 0.702713, 0.7021, 0.701786, 0.698849, 0.698244, 0.697524, 0.696164, 0.694821, 0.693848, 0.692765, 0.691722, 0.690438, 0.688338,
0.687698, 0.686042, 0.684684, 0.683677, 0.681998, 0.680999, 0.680403, 0.679899, 0.679368, 0.677706, 0.676072, 0.674366
])
Bell_bundle_bypass_z_low_0_05 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.97132, 0.966107, 0.959971, 0.942755, 0.934996, 0.931875, 0.915726, 0.908906, 0.906104,
0.904563, 0.901473, 0.895196, 0.891354, 0.885977, 0.87906, 0.874187, 0.867386, 0.86321, 0.856262, 0.854938, 0.84878, 0.848124, 0.843964, 0.841509,
0.838004, 0.835546, 0.830988, 0.826241, 0.823289, 0.81997, 0.816844, 0.812892, 0.810104, 0.806526, 0.804106, 0.801259, 0.795601, 0.793967, 0.792688,
0.789419, 0.787596, 0.785077, 0.782932, 0.782351, 0.779127, 0.777205, 0.776119, 0.773238, 0.77216, 0.770953, 0.767771, 0.766585, 0.763514, 0.761099,
0.758428, 0.756396, 0.754714, 0.752376, 0.749281, 0.745922, 0.743739, 0.743322, 0.738296, 0.737388, 0.73372, 0.732874, 0.730275, 0.727663, 0.725519,
0.722266, 0.720207, 0.716983, 0.715295, 0.713509, 0.710531, 0.709487, 0.70646, 0.705709, 0.703842, 0.702816, 0.702076, 0.700776, 0.699579, 0.698012,
0.693361, 0.691743, 0.687698, 0.686208, 0.685168, 0.682279, 0.682134, 0.677697, 0.676739, 0.674366, 0.671761, 0.671172, 0.668654, 0.666449,
0.665778, 0.664536, 0.662357, 0.660396, 0.659676, 0.657624, 0.655391, 0.653126, 0.652298, 0.648972, 0.647223, 0.643551, 0.642155, 0.64196, 0.639714,
0.639035, 0.638682, 0.635109, 0.634371, 0.633993, 0.63046, 0.629731, 0.628867, 0.627232, 0.625218, 0.62376, 0.622139, 0.621202, 0.62005, 0.618248,
0.617731, 0.615947, 0.614484, 0.61328, 0.611273, 0.61008, 0.608305, 0.606806, 0.605229, 0.603678, 0.602222, 0.600702
])
Bell_bundle_bypass_z_low_0 = np.array([1.0, 0.99999, 0.9999, 0.999, 0.952236, 0.940656, 0.929217, 0.90002, 0.886521, 0.880701, 0.850893, 0.838458, 0.831886,
0.823549, 0.818189, 0.807989, 0.801404, 0.794512, 0.78485, 0.776988, 0.766488, 0.760275, 0.751029, 0.749052, 0.740111, 0.739124, 0.732874, 0.729198,
0.723961, 0.720158, 0.713129, 0.705842, 0.701326, 0.696132, 0.690988, 0.684186, 0.679334, 0.67352, 0.66971, 0.665448, 0.657018, 0.654621, 0.652811,
0.648334, 0.645676, 0.641432, 0.637791, 0.636967, 0.632602, 0.630005, 0.628212, 0.623369, 0.621616, 0.619905, 0.61565, 0.61403, 0.609576, 0.606083,
0.601936, 0.598691, 0.596011, 0.592666, 0.588251, 0.583992, 0.581238, 0.580668, 0.574145, 0.572724, 0.566812, 0.565183, 0.56069, 0.556978, 0.55452,
0.550223, 0.547289, 0.543116, 0.540988, 0.538616, 0.534414, 0.532944, 0.528694, 0.527116, 0.523265, 0.521112, 0.519429, 0.516481, 0.514038,
0.512668, 0.508511, 0.507017, 0.503284, 0.500089, 0.497867, 0.493714, 0.49354, 0.487757, 0.486467, 0.482972, 0.479717, 0.478981, 0.476477, 0.473881,
0.472928, 0.470456, 0.467252, 0.464687, 0.46375, 0.461495, 0.458195, 0.45486, 0.453935, 0.45032, 0.448303, 0.443758, 0.442035, 0.441609, 0.437337,
0.436027, 0.435562, 0.431011, 0.430169, 0.429742, 0.425761, 0.424942, 0.423971, 0.422137, 0.42001, 0.418705, 0.417255, 0.416416, 0.414108, 0.41035,
0.409842, 0.408091, 0.406656, 0.405331, 0.402949, 0.401536, 0.399438, 0.39767, 0.395938, 0.394249, 0.392668, 0.391019
])
Bell_bundle_bypass_z_low = np.array([Bell_bundle_bypass_z_low_0, Bell_bundle_bypass_z_low_0_05, Bell_bundle_bypass_z_low_0_1, Bell_bundle_bypass_z_low_0_167, Bell_bundle_bypass_z_low_0_3, Bell_bundle_bypass_z_low_0_5]).T
Bell_bundle_bypass_low_obj = RectBivariateSpline(Bell_bundle_bypass_x, Bell_bundle_bypass_z_values, Bell_bundle_bypass_z_low, kx = 3, ky = 3, s = 0.0007)
#for ys in Bell_bundle_bypass_z_low.T:
# plt.plot(Bell_bundle_bypass_x, ys)
#
#for z in Bell_bundle_bypass_z_values:
# xs = np.linspace(min(Bell_bundle_bypass_x), max(Bell_bundle_bypass_x), 1000)
# ys = np.clip(Bell_bundle_bypass_low_obj(xs, z), 0, 1)
# plt.plot(xs, ys, '--')
#plt.show()
def test_bundle_bypassing_Bell():
from ht.conv_tube_bank import Bell_bundle_bypass_high_spl, Bell_bundle_bypass_low_spl
low_spl = Bell_bundle_bypass_low_obj.tck + Bell_bundle_bypass_low_obj.degrees
high_spl = Bell_bundle_bypass_high_obj.tck + Bell_bundle_bypass_high_obj.degrees
[assert_close1d(i, j) for i, j in zip(Bell_bundle_bypass_high_spl[:-2], high_spl[:-2])]
[assert_close1d(i, j) for i, j in zip(Bell_bundle_bypass_low_spl[:-2], low_spl[:-2])]
def test_unequal_baffle_spacing_Bell():
Js = unequal_baffle_spacing_Bell(16, .1, .15, 0.15)
assert_close(Js, 0.9640087802805195)
def test_laminar_correction_Bell():
Jr = laminar_correction_Bell(30.0, 80)
assert_close(Jr, 0.7267995454361379)
assert_close(0.4, laminar_correction_Bell(30, 80000)) | mit |
DhrubajyotiDas/PyAbel | examples/example_dasch_methods.py | 1 | 1597 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""example_dasch_methods.py.
"""
import numpy as np
import abel
import matplotlib.pyplot as plt
# Dribinski sample image size 501x501
n = 501
IM = abel.tools.analytical.sample_image(n)
# split into quadrants
origQ = abel.tools.symmetry.get_image_quadrants(IM)
# speed distribution of original image
orig_speed = abel.tools.vmi.angular_integration(origQ[0], origin=(0,0))
scale_factor = orig_speed[1].max()
plt.plot(orig_speed[0], orig_speed[1]/scale_factor, linestyle='dashed',
label="Dribinski sample")
# forward Abel projection
fIM = abel.Transform(IM, direction="forward", method="hansenlaw").transform
# split projected image into quadrants
Q = abel.tools.symmetry.get_image_quadrants(fIM)
dasch_transform = {\
"two_point": abel.dasch.two_point_transform,
"three_point": abel.dasch.three_point_transform,
"onion_peeling": abel.dasch.onion_peeling_transform}
for method in dasch_transform.keys():
Q0 = Q[0].copy()
# method inverse Abel transform
AQ0 = dasch_transform[method](Q0, basis_dir='bases')
# speed distribution
speed = abel.tools.vmi.angular_integration(AQ0, origin=(0,0))
plt.plot(speed[0], speed[1]*orig_speed[1][14]/speed[1][14]/scale_factor,
label=method)
plt.title("Dasch methods for Dribinski sample image $n={:d}$".format(n))
plt.axis(xmax=250, ymin=-0.1)
plt.legend(loc=0, frameon=False, labelspacing=0.1, fontsize='small')
plt.savefig("plot_example_dasch_methods.png",dpi=100)
plt.show()
| mit |
jgillis/casadi | examples/python/modelica/fritzson_application_examples/thermodynamics_example.py | 1 | 6483 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
import os
import sys
import numpy as NP
from numpy import *
import matplotlib.pyplot as plt
import zipfile
import time
import shutil
try:
# JModelica
from pymodelica import compile_jmu
from pyjmi import JMUModel
import pymodelica
use_precompiled = False
except:
print "No jmodelica installation, falling back to precompiled XML-files"
use_precompiled = True
# CasADi
from casadi import *
# Matplotlib interactive mode
#plt.ion()
# Compile Modelica code to XML
def comp(name):
curr_dir = os.path.dirname(os.path.abspath(__file__))
if use_precompiled:
shutil.copy(curr_dir + '/precompiled_' + name + '.xml', name + '.xml')
else:
jmu_name = compile_jmu(name, curr_dir+"/thermodynamics_example.mo",'modelica','ipopt',{'generate_xml_equations':True, 'generate_fmi_me_xml':False})
modname = name.replace('.','_')
sfile = zipfile.ZipFile(curr_dir+'/'+modname+'.jmu','r')
mfile = sfile.extract('modelDescription.xml','.')
os.remove(modname+'.jmu')
os.rename('modelDescription.xml',modname+'.xml')
# Compile the simplemost example (conservation of mass in control volume)
comp("BasicVolumeMassConservation")
# Read a model from XML
ocp = SymbolicOCP()
ocp.parseFMI('BasicVolumeMassConservation.xml')
# Make the OCP explicit
ocp.makeExplicit()
# Eliminate the algebraic states
ocp.eliminateAlgebraic()
# Inputs to the integrator
dae_fcn_in = daeIn(
t = ocp.t,
x = vertcat(var(ocp.x)),
p = vertcat(var(ocp.pi)+var(ocp.pf))
)
# Create an integrator
dae = SXFunction(dae_fcn_in,daeOut(ode=ocp.ode))
integrator = CVodesIntegrator(dae)
# Output function
m = ocp.variable("m").var()
P = ocp.variable("P").var()
output_fcn_out = ocp.substituteDependents([m,P])
output_fcn_in = daeIn(
t=ocp.t,
x = vertcat(var(ocp.x)),
z = vertcat(var(ocp.z)),
p = vertcat(var(ocp.pi)+var(ocp.pf)+var(ocp.u))
)
output_fcn = SXFunction(output_fcn_in,output_fcn_out)
# Create a simulator
grid = NP.linspace(0,1,100)
simulator = Simulator(integrator,output_fcn,grid)
simulator.init()
# Pass initial conditions
x0 = getStart(ocp.x)
simulator.setInput(x0,"x0")
# Simulate
simulator.evaluate()
integrator.printStats()
# Plot
plt.figure(1)
plt.subplot(1,2,1)
plt.plot(grid,simulator.output())
plt.xlabel("t")
plt.ylabel("m(t)")
plt.title("c.f. Fritzson figure 15-6 (left)")
plt.subplot(1,2,2)
plt.plot(grid,simulator.output(1))
plt.xlabel("t")
plt.ylabel("P(t)")
plt.title("c.f. Fritzson figure 15-6 (right)")
plt.draw()
# Compile the next example (conservation of energy in control volume)
comp("BasicVolumeEnergyConservation")
# Allocate a parser and load the xml
ocp = SymbolicOCP()
ocp.parseFMI('BasicVolumeEnergyConservation.xml')
# Make the OCP explicit
ocp.makeExplicit()
# Eliminate the algebraic states
ocp.eliminateAlgebraic()
# Inputs to the integrator
dae_fcn_in = daeIn(
t = ocp.t,
x = vertcat(var(ocp.x)),
p = vertcat(var(ocp.pi)+var(ocp.pf))
)
# Create an integrator
dae = SXFunction(dae_fcn_in,daeOut(ode=ocp.ode))
integrator = CVodesIntegrator(dae)
# Output function
T = ocp.variable("T").var()
output_fcn_out = ocp.substituteDependents([T])
output_fcn_in = daeIn(
t=ocp.t,
x = vertcat(var(ocp.x)),
z = vertcat(var(ocp.z)),
p = vertcat(var(ocp.pi)+var(ocp.pf)+var(ocp.u))
)
output_fcn = SXFunction(output_fcn_in,output_fcn_out)
# Create a simulator
grid = NP.linspace(0,10,100)
simulator = Simulator(integrator,output_fcn,grid)
simulator.init()
# Pass initial conditions
x0 = getStart(ocp.x)
simulator.setInput(x0,"x0")
# Simulate
simulator.evaluate()
integrator.printStats()
# Plot
plt.figure(2)
plt.plot(grid,simulator.output())
plt.xlabel("t")
plt.ylabel("T(t)")
plt.title("c.f. Fritzson figure 15-9")
plt.draw()
# Compile the next example (Heat transfer and work)
comp("BasicVolumeTest")
# Allocate a parser and load the xml
ocp = SymbolicOCP()
ocp.parseFMI('BasicVolumeTest.xml')
# Make explicit
ocp.makeExplicit()
# Eliminate the algebraic states
ocp.eliminateAlgebraic()
# Inputs to the integrator
dae_fcn_in = daeIn(
t = ocp.t,
x = vertcat(var(ocp.x)),
p = vertcat(var(ocp.pi)+var(ocp.pf))
)
# Create an integrator
dae = SXFunction(dae_fcn_in,daeOut(ode=ocp.ode))
integrator = CVodesIntegrator(dae)
# Output function
T = ocp.variable("T").var()
U = ocp.variable("U").var()
V = ocp.variable("V").var()
output_fcn_out = ocp.substituteDependents([T,U,V])
output_fcn_in = daeIn(
t=ocp.t,
x = vertcat(var(ocp.x)),
z = vertcat(var(ocp.z)),
p = vertcat(var(ocp.pi)+var(ocp.pf)+var(ocp.u))
)
output_fcn = SXFunction(output_fcn_in,output_fcn_out)
# Create a simulator
grid = NP.linspace(0,2,100)
simulator = Simulator(integrator,output_fcn,grid)
simulator.init()
# Pass initial conditions
x0 = getStart(ocp.x)
simulator.setInput(x0,"x0")
# Simulate
simulator.evaluate()
integrator.printStats()
# Plot
plt.figure(3)
p1, = plt.plot(grid,simulator.output(0))
p2, = plt.plot(grid,simulator.output(1))
plt.xlabel("t")
plt.ylabel("T(t)")
plt.legend([p2, p1], ["T", "U"])
plt.title("c.f. Fritzson figure 15-14")
plt.figure(4)
plt.plot(grid,simulator.output(2))
plt.xlabel("t")
plt.ylabel("V(t)")
plt.title("Approximation of V")
plt.draw()
# Compile the next example (conservation of energy in control volume)
comp("CtrlFlowSystem")
# Allocate a parser and load the xml
ocp = SymbolicOCP()
ocp.parseFMI('CtrlFlowSystem.xml')
# Make the OCP explicit
ocp.makeExplicit()
# Print the ocp
print ocp
# The problem has no differential states, so instead of integrating, we just solve for mdot...
plt.show()
| lgpl-3.0 |
sysid/kg | fish/ShowMeTheFish.py | 1 | 23609 | """
credit: https://www.kaggle.com/xingyang/the-nature-conservancy-fisheries-monitoring/show-me-the-fishes-object-localization-with-cnn
code comments: tw
In order to run this script, you need to download the annotation files from https://www.kaggle.com/c/the-nature-conservancy-fisheries-monitoring/discussion/25902 and modify the DATASET_FOLDER_PATH variable. The script has been tested on Python 3.6 with latest packages. You might need to modify the script because of the possible compatibility issues.
The localization algorithm implemented here could achieve satisfactory results on the testing dataset. To further improve the performance, you may find the following links useful.
https://deepsense.io/deep-learning-right-whale-recognition-kaggle/
http://felixlaumon.github.io/2015/01/08/kaggle-right-whale.html
https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
It's a regression task and I want to predict 4 numbers, namely,
(row_start_index / row_size, (row_end_index - row_start_index) / row_size, column_start_index / column_size,
(column_end_index - column_start_index) / column_size). These 4 numbers are in the range of [0, 1].
Using "sigmoid" activation is appropriate although we are not working on probabilities.
Another choice is to use a clipping function in the end. I presume that there won't be huge differences between these two options.
"""
#assert False
import matplotlib
matplotlib.use("Agg")
import os
import glob
import shutil
import json
import pylab
import numpy as np
import keras
import tensorflow as tf
from keras.applications.vgg16 import VGG16
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Dropout, Flatten, Input
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.visualize_util import plot
from scipy.misc import imread, imsave, imresize
from sklearn.cluster import DBSCAN
from sklearn.model_selection import GroupShuffleSplit
# Dataset
#DATASET_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset/The Nature Conservancy Fisheries Monitoring")
DATASET_FOLDER_PATH = os.path.join(os.getcwd(), "data/original")
TRAIN_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "train")
TEST_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "test_stg1")
LOCALIZATION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "localization")
ANNOTATION_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "annotations")
CLUSTERING_RESULT_FILE_PATH = os.path.join(DATASET_FOLDER_PATH, "clustering_result.npy")
# Workspace
WORKSPACE_FOLDER_PATH = os.path.join("/tmp", os.path.basename(DATASET_FOLDER_PATH))
CLUSTERING_FOLDER_PATH = os.path.join(WORKSPACE_FOLDER_PATH, "clustering")
ACTUAL_DATASET_FOLDER_PATH = os.path.join(WORKSPACE_FOLDER_PATH, "actual_dataset")
ACTUAL_TRAIN_ORIGINAL_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "train_original")
ACTUAL_VALID_ORIGINAL_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "valid_original")
ACTUAL_TRAIN_LOCALIZATION_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "train_localization")
ACTUAL_VALID_LOCALIZATION_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "valid_localization")
# Output
OUTPUT_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "{}_output".format(os.path.basename(__file__).split(".")[0]))
VISUALIZATION_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "Visualization")
OPTIMAL_WEIGHTS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "Optimal_Weights")
OPTIMAL_WEIGHTS_FILE_RULE = os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "epoch_{epoch:03d}-loss_{loss:.5f}-val_loss_{val_loss:.5f}.h5")
# Image processing
IMAGE_ROW_SIZE = 256
IMAGE_COLUMN_SIZE = 256
# Training and Testing procedure
MAXIMUM_EPOCH_NUM = 100 # 1000
PATIENCE = 100
BATCH_SIZE = 8
INSPECT_SIZE = 4
def reformat_testing_dataset():
# Create a dummy folder
dummy_test_folder_path = os.path.join(TEST_FOLDER_PATH, "dummy")
os.makedirs(dummy_test_folder_path, exist_ok=True)
# Move files to the dummy folder if needed
file_path_list = glob.glob(os.path.join(TEST_FOLDER_PATH, "*"))
for file_path in file_path_list:
if os.path.isfile(file_path):
shutil.move(file_path, os.path.join(dummy_test_folder_path, os.path.basename(file_path)))
def load_annotation():
annotation_dict = {}
annotation_file_path_list = glob.glob(os.path.join(ANNOTATION_FOLDER_PATH, "*.json"))
for annotation_file_path in annotation_file_path_list:
with open(annotation_file_path) as annotation_file:
annotation_file_content = json.load(annotation_file)
for item in annotation_file_content:
key = os.path.basename(item["filename"])
if key in annotation_dict:
assert False, "Found existing key {}!!!".format(key)
value = []
for annotation in item["annotations"]:
value.append(np.clip((annotation["x"], annotation["width"], annotation["y"], annotation["height"]), 0, np.inf).astype(np.int))
annotation_dict[key] = value
return annotation_dict
def reformat_localization():
print("Creating the localization folder ...")
os.makedirs(LOCALIZATION_FOLDER_PATH, exist_ok=True)
print("Loading annotation ...")
annotation_dict = load_annotation()
original_image_path_list = glob.glob(os.path.join(TRAIN_FOLDER_PATH, "*/*"))
for original_image_path in original_image_path_list:
localization_image_path = LOCALIZATION_FOLDER_PATH + original_image_path[len(TRAIN_FOLDER_PATH):]
if os.path.isfile(localization_image_path):
continue
localization_image_content = np.zeros(imread(original_image_path).shape[:2], dtype=np.uint8)
for annotation_x, annotation_width, annotation_y, annotation_height in annotation_dict.get(os.path.basename(original_image_path), []):
localization_image_content[annotation_y:annotation_y + annotation_height, annotation_x:annotation_x + annotation_width] = 255
os.makedirs(os.path.abspath(os.path.join(localization_image_path, os.pardir)), exist_ok=True)
imsave(localization_image_path, localization_image_content)
def perform_CV(image_path_list, resized_image_row_size=64, resized_image_column_size=64):
'''
clustering only requires dim: 64.
purpose: get image clusters and build your train/valid set according to these clusters, i.e. the cluster should be either in train or in valid set
but not in both.
'''
if os.path.isfile(CLUSTERING_RESULT_FILE_PATH):
print("Loading clustering result ...")
image_name_to_cluster_ID_array = np.load(CLUSTERING_RESULT_FILE_PATH)
image_name_to_cluster_ID_dict = dict(image_name_to_cluster_ID_array)
cluster_ID_array = np.array([image_name_to_cluster_ID_dict[os.path.basename(image_path)] for image_path in image_path_list], dtype=np.int)
else:
print("Reading image content ...")
# (3777, 64, 64, 3)
image_content_array = np.array([imresize(imread(image_path), (resized_image_row_size, resized_image_column_size)) for image_path in image_path_list])
# (3777, 12288)
image_content_array = np.reshape(image_content_array, (len(image_content_array), -1))
# normalize picture-wise
image_content_array = np.array([(image_content - image_content.mean()) / image_content.std() for image_content in image_content_array], dtype=np.float32)
# tw: defines similarity, distance of pixel points. -1 = noise
print("Apply boat/image clustering ...")
cluster_ID_array = DBSCAN(eps=1.5 * resized_image_row_size * resized_image_column_size, min_samples=20, metric="l1", n_jobs=-1).fit_predict(image_content_array)
print("Saving clustering result ...")
image_name_to_cluster_ID_array = np.transpose(np.vstack(([os.path.basename(image_path) for image_path in image_path_list], cluster_ID_array)))
np.save(CLUSTERING_RESULT_FILE_PATH, image_name_to_cluster_ID_array)
print("The ID value and count are as follows:")
cluster_ID_values, cluster_ID_counts = np.unique(cluster_ID_array, return_counts=True)
for cluster_ID_value, cluster_ID_count in zip(cluster_ID_values, cluster_ID_counts):
print("{}\t{}".format(cluster_ID_value, cluster_ID_count))
print("Visualizing clustering result ...")
shutil.rmtree(CLUSTERING_FOLDER_PATH, ignore_errors=True)
for image_path, cluster_ID in zip(image_path_list, cluster_ID_array):
sub_clustering_folder_path = os.path.join(CLUSTERING_FOLDER_PATH, str(cluster_ID))
if not os.path.isdir(sub_clustering_folder_path):
os.makedirs(sub_clustering_folder_path)
os.symlink(image_path, os.path.join(sub_clustering_folder_path, os.path.basename(image_path)))
# tw: exclude groups completely from training for validation, to make sure the model has not seen the group at all
cv_object = GroupShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
# return array of indices, where clusters are either in or out completely, no mix
for cv_index, (train_index_array, valid_index_array) in enumerate(cv_object.split(X=np.zeros((len(cluster_ID_array), 1)), groups=cluster_ID_array), start=1):
print("Checking cv {} ...".format(cv_index))
# get the proportion of validation samples (must not be too high or low)
valid_sample_ratio = len(valid_index_array) / (len(train_index_array) + len(valid_index_array))
if -1 in np.unique(cluster_ID_array[train_index_array]) and valid_sample_ratio > 0.15 and valid_sample_ratio < 0.25:
# get number of each class in train and validation set [train_index_array]
train_unique_label, train_unique_counts = np.unique([image_path.split("/")[-2] for image_path in np.array(image_path_list)[train_index_array]], return_counts=True)
valid_unique_label, valid_unique_counts = np.unique([image_path.split("/")[-2] for image_path in np.array(image_path_list)[valid_index_array]], return_counts=True)
# make sure that all classes in train set also are in validation set
if np.array_equal(train_unique_label, valid_unique_label):
# calculate the class proportion in the entire train/valid set: show whether similar distribution is given
train_unique_ratio = train_unique_counts / np.sum(train_unique_counts)
valid_unique_ratio = valid_unique_counts / np.sum(valid_unique_counts)
print("Using {:.2f}% original training samples as validation samples ...".format(valid_sample_ratio * 100))
print("For training samples: {}".format(train_unique_ratio))
print("For validation samples: {}".format(valid_unique_ratio))
return train_index_array, valid_index_array
assert False
def reorganize_dataset():
# Get list of files
original_image_path_list = sorted(glob.glob(os.path.join(TRAIN_FOLDER_PATH, "*/*")))
localization_image_path_list = sorted(glob.glob(os.path.join(LOCALIZATION_FOLDER_PATH, "*/*")))
# Sanity check
original_image_name_list = [os.path.basename(image_path) for image_path in original_image_path_list]
localization_image_name_list = [os.path.basename(image_path) for image_path in localization_image_path_list]
assert np.array_equal(original_image_name_list, localization_image_name_list)
# Perform Cross Validation
train_index_array, valid_index_array = perform_CV(original_image_path_list)
# Create symbolic links
shutil.rmtree(ACTUAL_DATASET_FOLDER_PATH, ignore_errors=True)
for (actual_original_folder_path, actual_localization_folder_path), index_array in zip(
((ACTUAL_TRAIN_ORIGINAL_FOLDER_PATH, ACTUAL_TRAIN_LOCALIZATION_FOLDER_PATH),
(ACTUAL_VALID_ORIGINAL_FOLDER_PATH, ACTUAL_VALID_LOCALIZATION_FOLDER_PATH)),
(train_index_array, valid_index_array)):
for index_value in index_array:
original_image_path = original_image_path_list[index_value]
localization_image_path = localization_image_path_list[index_value]
path_suffix = original_image_path[len(TRAIN_FOLDER_PATH):]
assert path_suffix == localization_image_path[len(LOCALIZATION_FOLDER_PATH):]
if path_suffix[1:].startswith("NoF"):
continue
actual_original_image_path = actual_original_folder_path + path_suffix
actual_localization_image_path = actual_localization_folder_path + path_suffix
os.makedirs(os.path.abspath(os.path.join(actual_original_image_path, os.pardir)), exist_ok=True)
os.makedirs(os.path.abspath(os.path.join(actual_localization_image_path, os.pardir)), exist_ok=True)
os.symlink(original_image_path, actual_original_image_path)
os.symlink(localization_image_path, actual_localization_image_path)
return len(glob.glob(os.path.join(ACTUAL_TRAIN_ORIGINAL_FOLDER_PATH, "*/*"))), len(glob.glob(os.path.join(ACTUAL_VALID_ORIGINAL_FOLDER_PATH, "*/*")))
def init_model(target_num=4, FC_block_num=2, FC_feature_dim=512, dropout_ratio=0.5, learning_rate=0.0001):
# Get the input tensor
#input_tensor = Input(shape=(3, IMAGE_ROW_SIZE, IMAGE_COLUMN_SIZE))
input_tensor = Input(shape=(IMAGE_ROW_SIZE, IMAGE_COLUMN_SIZE, 3)) # tw: TF
# Convolutional blocks
pretrained_model = VGG16(include_top=False, weights="imagenet")
for layer in pretrained_model.layers:
layer.trainable = False
output_tensor = pretrained_model(input_tensor)
# FullyConnected blocks
output_tensor = Flatten()(output_tensor)
for _ in range(FC_block_num):
output_tensor = Dense(FC_feature_dim, activation="relu")(output_tensor)
output_tensor = BatchNormalization()(output_tensor)
output_tensor = Dropout(dropout_ratio)(output_tensor)
# Yes. It's a regression task and I want to predict 4 numbers, namely, (row_start_index / row_size, (row_end_index - row_start_index) / row_size, column_start_index / column_size, (column_end_index - column_start_index) / column_size).
# These 4 numbers are in the range of [0, 1]. Using "sigmoid" activation is appropriate although we are not working on probabilities. Another choice is to use a clipping function in the end. I presume that there won't be huge differences between these two options.
output_tensor = Dense(target_num, activation="sigmoid")(output_tensor)
# Define and compile the model
model = Model(input_tensor, output_tensor)
model.compile(optimizer=Adam(lr=learning_rate), loss="mse")
# http://stackoverflow.com/questions/41818654/keras-batchnormalization-uninitialized-value
keras.backend.get_session().run(tf.global_variables_initializer())
#plot(model, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "model.png"), show_shapes=True, show_layer_names=True)
model.summary()
return model
def convert_localization_to_annotation(localization_array, row_size=IMAGE_ROW_SIZE, column_size=IMAGE_COLUMN_SIZE):
annotation_list = []
for localization in localization_array:
localization = localization[0]
mask_along_row = np.max(localization, axis=1) > 0.5
row_start_index = np.argmax(mask_along_row)
row_end_index = len(mask_along_row) - np.argmax(np.flipud(mask_along_row)) - 1
mask_along_column = np.max(localization, axis=0) > 0.5
column_start_index = np.argmax(mask_along_column)
column_end_index = len(mask_along_column) - np.argmax(np.flipud(mask_along_column)) - 1
annotation = (row_start_index / row_size, (row_end_index - row_start_index) / row_size, column_start_index / column_size, (column_end_index - column_start_index) / column_size)
annotation_list.append(annotation)
return np.array(annotation_list).astype(np.float32)
def convert_annotation_to_localization(annotation_array, row_size=IMAGE_ROW_SIZE, column_size=IMAGE_COLUMN_SIZE):
localization_list = []
for annotation in annotation_array:
localization = np.zeros((row_size, column_size))
row_start_index = np.max((0, int(annotation[0] * row_size)))
row_end_index = np.min((row_start_index + int(annotation[1] * row_size), row_size - 1))
column_start_index = np.max((0, int(annotation[2] * column_size)))
column_end_index = np.min((column_start_index + int(annotation[3] * column_size), column_size - 1))
print("bbx: ", column_start_index, column_end_index, row_start_index, row_end_index)
localization[row_start_index:row_end_index + 1, column_start_index:column_end_index + 1] = 1
#localization_list.append(np.expand_dims(localization, axis=0)) # TH dim
#localization_list.append(localization[:, :, np.newaxis])
localization_list.append(localization)
return np.array(localization_list).astype(np.float32)
def load_dataset(folder_path_list, color_mode_list, batch_size, classes=None, class_mode=None, shuffle=True, seed=None, apply_conversion=False):
# Get the generator of the dataset
data_generator_list = []
for folder_path, color_mode in zip(folder_path_list, color_mode_list):
data_generator_object = ImageDataGenerator(
rotation_range=0, # 10
width_shift_range=0, #0.05,
height_shift_range=0, #0.05,
shear_range=0, #0.05,
zoom_range=0, #0.2,
horizontal_flip=False, #True,
rescale=1.0 / 255)
data_generator = data_generator_object.flow_from_directory(
directory=folder_path,
target_size=(IMAGE_ROW_SIZE, IMAGE_COLUMN_SIZE),
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed)
data_generator_list.append(data_generator)
# Sanity check
filenames_list = [data_generator.filenames for data_generator in data_generator_list]
assert all(filenames == filenames_list[0] for filenames in filenames_list)
if apply_conversion:
assert len(data_generator_list) == 2
for X_array, Y_array in zip(*data_generator_list):
yield (X_array, convert_localization_to_annotation(Y_array))
else:
for array_tuple in zip(*data_generator_list):
yield array_tuple
class InspectPrediction(Callback):
def __init__(self, data_generator_list):
super(InspectPrediction, self).__init__()
self.data_generator_list = data_generator_list
def on_epoch_end(self, epoch, logs=None):
for data_generator_index, data_generator in enumerate(self.data_generator_list, start=1):
X_array, GT_Y_array = next(data_generator)
P_Y_array = convert_annotation_to_localization(self.model.predict_on_batch(X_array))
for sample_index, (X, GT_Y, P_Y) in enumerate(zip(X_array, GT_Y_array, P_Y_array), start=1):
pylab.figure()
pylab.subplot(1, 3, 1)
#pylab.imshow(np.rollaxis(X, 0, 3))
pylab.imshow(X)
pylab.title("X")
pylab.axis("off")
pylab.subplot(1, 3, 2)
#pylab.imshow(GT_Y[0], cmap="gray")
pylab.imshow(np.squeeze(GT_Y), cmap="gray")
pylab.title("GT_Y")
pylab.axis("off")
pylab.subplot(1, 3, 3)
#pylab.imshow(P_Y[0], cmap="gray")
pylab.imshow(P_Y, cmap="gray")
pylab.title("P_Y")
pylab.axis("off")
pylab.savefig(os.path.join(VISUALIZATION_FOLDER_PATH, "Epoch_{}_Split_{}_Sample_{}.png".format(epoch + 1, data_generator_index, sample_index)))
pylab.close()
class InspectLoss(Callback):
def __init__(self):
super(InspectLoss, self).__init__()
self.train_loss_list = []
self.valid_loss_list = []
def on_epoch_end(self, epoch, logs=None):
train_loss = logs.get("loss")
valid_loss = logs.get("val_loss")
self.train_loss_list.append(train_loss)
self.valid_loss_list.append(valid_loss)
epoch_index_array = np.arange(len(self.train_loss_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_loss_list, "yellowgreen", label="train_loss")
pylab.plot(epoch_index_array, self.valid_loss_list, "lightskyblue", label="valid_loss")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "Loss_Curve.png"))
pylab.close()
def run():
print("Creating folders ...")
os.makedirs(VISUALIZATION_FOLDER_PATH, exist_ok=True)
os.makedirs(OPTIMAL_WEIGHTS_FOLDER_PATH, exist_ok=True)
print("Reformatting testing dataset ...")
reformat_testing_dataset()
print("Reformatting localization ...")
reformat_localization()
print("Reorganizing dataset ...")
train_sample_num, valid_sample_num = reorganize_dataset()
print("Initializing model ...")
model = init_model()
weights_file_path_list = sorted(glob.glob(os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "*.h5")))
wname = 'epoch_017-loss_0.00547-val_loss_0.00831.h5'
if len(weights_file_path_list) > 0:
print("Loading weights: ", wname)
model.load_weights(os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, wname))
print("Performing the training procedure ...")
train_generator = load_dataset(folder_path_list=[ACTUAL_TRAIN_ORIGINAL_FOLDER_PATH, ACTUAL_TRAIN_LOCALIZATION_FOLDER_PATH], color_mode_list=["rgb", "grayscale"], batch_size=BATCH_SIZE, seed=0, apply_conversion=True)
valid_generator = load_dataset(folder_path_list=[ACTUAL_VALID_ORIGINAL_FOLDER_PATH, ACTUAL_VALID_LOCALIZATION_FOLDER_PATH], color_mode_list=["rgb", "grayscale"], batch_size=BATCH_SIZE, seed=0, apply_conversion=True)
train_generator_for_inspection = load_dataset(folder_path_list=[ACTUAL_TRAIN_ORIGINAL_FOLDER_PATH, ACTUAL_TRAIN_LOCALIZATION_FOLDER_PATH], color_mode_list=["rgb", "grayscale"], batch_size=INSPECT_SIZE, seed=1)
valid_generator_for_inspection = load_dataset(folder_path_list=[ACTUAL_VALID_ORIGINAL_FOLDER_PATH, ACTUAL_VALID_LOCALIZATION_FOLDER_PATH], color_mode_list=["rgb", "grayscale"], batch_size=INSPECT_SIZE, seed=1)
earlystopping_callback = EarlyStopping(monitor="val_loss", patience=PATIENCE)
modelcheckpoint_callback = ModelCheckpoint(OPTIMAL_WEIGHTS_FILE_RULE, monitor="val_loss", save_best_only=True, save_weights_only=True)
inspectprediction_callback = InspectPrediction([train_generator_for_inspection, valid_generator_for_inspection])
inspectloss_callback = InspectLoss()
model.fit_generator(generator=train_generator,
samples_per_epoch=train_sample_num,
validation_data=valid_generator,
nb_val_samples=valid_sample_num,
callbacks=[earlystopping_callback, modelcheckpoint_callback, inspectprediction_callback, inspectloss_callback],
nb_epoch=MAXIMUM_EPOCH_NUM, verbose=1)
weights_file_path_list = sorted(glob.glob(os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "*.h5")))
print("All done!")
if __name__ == "__main__":
run()
| mit |
Windy-Ground/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
arongdari/digbeta | tour/src/poi_photo_assign.py | 3 | 3255 | import math
import sys
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import cm
def main():
"""Main Procedure"""
pois = [(10, 0), (20, 0), (30, 0)]
photos1 = [(10, 1), (20, 1), (30, 1)]
photos2 = [(10, 1), (11, 1), (12, 1)]
photos3 = [(20, 1), (21, 1), (22, 1)]
photos4 = [(30, 1), (31, 1), (32, 1)]
photos5 = [(5, 6), (18, 8), (28, 3)]
assignment = assign(photos1, pois)
draw(photos1, pois, assignment)
assignment = assign(photos2, pois)
draw(photos2, pois, assignment)
assignment = assign(photos3, pois)
draw(photos3, pois, assignment)
assignment = assign(photos4, pois)
draw(photos4, pois, assignment)
assignment = assign(photos5, pois)
draw(photos5, pois, assignment)
a = input('Press any key to continue ...')
def assign(photos, pois):
"""Assign photos to POIs with minimum cost"""
#REF: en.wikipedia.org/wiki/Minimum-cost_flow_problem
#assert(len(photos) == len(pois))
dists = np.zeros((len(photos), len(pois)), dtype=np.float64)
for i, d in enumerate(photos):
for j, p in enumerate(pois):
dists[i, j] = round(math.sqrt( (d[0] - p[0])**2 + (d[1] - p[1])**2 ))
#print(dists)
G = nx.DiGraph()
# complete bipartite graph: photo -> POI
# infinity capacity
for i in range(len(photos)):
for j in range(len(pois)):
u = 'd' + str(i)
v = 'p' + str(j)
G.add_edge(u, v, weight=dists[i, j])
# source -> photo
# capacity = 1
for i in range(len(photos)):
u = 's'
v = 'd' + str(i)
G.add_edge(u, v, capacity=1, weight=0)
# POI -> sink
# infinity capacity
for j in range(len(pois)):
u = 'p' + str(j)
v = 't'
G.add_edge(u, v, weight=0)
# demand for source and sink
G.add_node('s', demand=-len(photos))
G.add_node('t', demand=len(photos))
#print(G.nodes())
#print(G.edges())
flowDict = nx.min_cost_flow(G)
assignment = dict()
for e in G.edges():
u = e[0]
v = e[1]
if u != 's' and v != 't' and flowDict[u][v] > 0:
#print(e, flowDict[u][v])
assignment[u] = v
return assignment
def draw(photos, pois, assignment):
"""visualize the photo-POI assignment"""
#assert(len(photos) == len(pois) == len(assignment.keys()))
assert(len(photos) == len(assignment.keys()))
fig = plt.figure()
plt.axis('equal')
colors_poi = np.linspace(0, 1, len(pois))
X_poi = [t[0] for t in pois]
Y_poi = [t[1] for t in pois]
#plt.scatter(X_poi, Y_poi, s=50, marker='o', c=colors_poi, cmap=cm.Greens)
plt.scatter(X_poi, Y_poi, s=50, marker='s', c=cm.Greens(colors_poi), label='POI')
colors_photo = np.zeros(len(photos), dtype=np.float64)
for i in range(len(photos)):
k = 'd' + str(i)
v = assignment[k] # e.g. 'p1'
idx = int(v[1])
colors_photo[i] = colors_poi[idx]
X_photo = [t[0] for t in photos]
Y_photo = [t[1] for t in photos]
plt.scatter(X_photo, Y_photo, s=30, marker='o', c=cm.Greens(colors_photo), label='Photo')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
janscience/thunderfish | tests/test_bestwindow.py | 3 | 2649 | from nose.tools import assert_true, assert_equal, assert_almost_equal
import os
import numpy as np
import matplotlib.pyplot as plt
import thunderfish.bestwindow as bw
def test_best_window():
# generate data:
rate = 100000.0
clip = 1.3
time = np.arange(0.0, 1.0, 1.0 / rate)
snippets = []
f = 600.0
amf = 20.0
for ampl in [0.2, 0.5, 0.8]:
for am_ampl in [0.0, 0.3, 0.9]:
data = ampl * np.sin(2.0 * np.pi * f * time) * (1.0 + am_ampl * np.sin(2.0 * np.pi * amf * time))
data[data > clip] = clip
data[data < -clip] = -clip
snippets.extend(data)
data = np.asarray(snippets)
# compute best window:
print("call bestwindow() function...")
idx0, idx1, clipped = bw.best_window_indices(data, rate, expand=False,
win_size=1.0, win_shift=0.1,
min_clip=-clip, max_clip=clip,
w_cv_ampl=10.0, tolerance=0.5)
assert_equal(idx0, 6 * len(time), 'bestwindow() did not correctly detect start of best window')
assert_equal(idx1, 7 * len(time), 'bestwindow() did not correctly detect end of best window')
assert_almost_equal(clipped, 0.0, 'bestwindow() did not correctly detect clipped fraction')
# clipping:
clip_win_size = 0.5
min_clip, max_clip = bw.clip_amplitudes(data, int(clip_win_size * rate),
min_ampl=-1.3, max_ampl=1.3,
min_fac=2.0, nbins=40)
assert_true(min_clip <= -0.8 * clip and min_clip >= -clip,
'clip_amplitudes() failed to detect minimum clip amplitude')
assert_true(max_clip >= 0.8 * clip and max_clip <= clip,
'clip_amplitudes() failed to detect maximum clip amplitude')
# plotting 1:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
bw.plot_best_data(ax, data, rate, 'a.u.', idx0, idx1, clipped)
fig.savefig('bestwindow.png')
assert_true(os.path.exists('bestwindow.png'), 'plotting failed')
os.remove('bestwindow.png')
# plotting 2:
fig, ax = plt.subplots(5, sharex=True)
bw.best_window_indices(data, rate, expand=False,
win_size=1.0, win_shift=0.1,
min_clip=-clip, max_clip=clip,
w_cv_ampl=10.0, tolerance=0.5,
plot_data_func=bw.plot_best_window, ax=ax)
fig.savefig('bestdata.png')
assert_true(os.path.exists('bestdata.png'), 'plotting failed')
os.remove('bestdata.png')
| gpl-3.0 |
leal26/pyXFOIL | aeropy/geometry/frame.py | 2 | 3366 | from aeropy.geometry.parametric import poly
import numpy as np
import math
class frame():
def __init__(self, curve=poly(), frame='Frenet-Serret',
z1=np.linspace(0, 1, 11)):
self.curve = poly()
self.frame = frame
self.z1 = z1
self.z2 = self.curve.z2(z1)
def z1_default(self, z1):
if z1 is None:
return(self.z1)
else:
return(z1)
def bishop(self, z1=None):
z1 = self.z1_default(z1)
T = self.T(z1)
alpha = self.alpha(z1)
M1 = self.M1(z1=z1, alpha=alpha)
M2 = self.M2(z1=z1, alpha=alpha)
return(T, M1, M2)
def frenet_serret(self, z1=None):
z1 = self.z1_default(z1)
T = self.T(z1)
N = self.N(z1=z1)
B = self.B(z1=z1)
return(T, N, B)
def T(self, z1=None):
z1 = self.z1_default(z1)
return(self.curve.rx1(z1))
def N(self, z1=None):
z1 = self.z1_default(z1)
return(self.curve.rx11(z1))
def M1(self, z1=None, alpha=None):
z1 = self.z1_default(z1)
if alpha is None:
alpha = self.alpha(z1)
if self.frame == 'Bishop':
new = np.cos(alpha)*self.N(z1) - np.sin(alpha)*self.B(z1)
return(np.cos(alpha)*self.N(z1) -
np.sin(alpha)*self.B(z1))
def M2(self, z1=None, alpha=None):
z1 = self.z1_default(z1)
if alpha is None:
alpha = self.alpha(z1)
if self.frame == 'Bishop':
return(np.sin(alpha)*self.N(z1) +
np.cos(alpha)*self.B(z1))
def B(self, z1=None):
z1 = self.z1_default(z1)
try:
return(np.ones(len(z1)))
except(ValueError):
return(1)
def curvature(self, z1=None):
z1 = self.z1_default(z1)
return(self.curve.rx11(z1))
def torsion(self, z1=None):
z1 = self.z1_default(z1)
try:
return(np.zeros(len(z1)))
except(ValueError):
return(0)
def alpha(self, z1=None, alpha0=0.0):
z1 = self.z1_default(z1)
alpha_list = []
inflections = self.curve.inflection_points()
j = 0
alpha_j = alpha0
for i in range(len(z1)):
if j != len(inflections):
if z1[i] >= inflections[j]:
alpha_j += math.pi
j += 1
alpha_list.append(alpha_j)
return(np.array(alpha_list))
def B(x, k, i, t):
if k == 0:
return 1.0 if t[i] <= x < t[i+1] else 0.0
if t[i+k] == t[i]:
c1 = 0.0
else:
c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
if t[i+k+1] == t[i+1]:
c2 = 0.0
else:
c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
return c1 + c2
def bspline(x, t, c, k):
n = len(t) - k - 1
assert (n >= k+1) and (len(c) >= n)
return sum(c[i] * B(x, k, i, t) for i in range(n))
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Bezier Curve
k = 2
t = [0, 1, 2, 3, 4, 5, 6]
c = [-1, 2, 0, -1]
x = np.linspace(1.5, 4.5, 50)
y_bezier = []
for x_i in x:
y_bezier.append(bspline(x_i, t, c, k))
# Hicks-Henne
plt.plot(x, y_bezier, label='Bezier')
plt.xlabel('x')
plt.ylabel('z')
plt.legend()
plt.show()
| mit |
TissueMAPS/TmLibrary | tmlib/workflow/jterator/module.py | 1 | 17816 | # TmLibrary - TissueMAPS library for distibuted image analysis routines.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
import logging
import imp
import collections
import importlib
import traceback
import numpy as np
from cStringIO import StringIO
from tmlib.workflow.jterator.utils import determine_language
from tmlib.workflow.jterator import handles as hdls
from tmlib.errors import PipelineRunError
logger = logging.getLogger(__name__)
class CaptureOutput(dict):
'''Class for capturing standard output and error and storing the strings
in dictionary.
Examples
--------
with CaptureOutput() as output:
foo()
Warning
-------
Using this approach screws up debugger break points.
'''
def __enter__(self):
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = self._stringio_out = StringIO()
sys.stderr = self._stringio_err = StringIO()
return self
def __exit__(self, *args):
sys.stdout = self._stdout
sys.stderr = self._stderr
output = self._stringio_out.getvalue()
error = self._stringio_err.getvalue()
self.update({'stdout': output, 'stderr': error})
class ImageAnalysisModule(object):
'''Class for a Jterator module, the building block of an image analysis
pipeline.
'''
def __init__(self, name, source_file, handles):
'''
Parameters
----------
name: str
name of the module
source_file: str
name or path to program file that should be executed
handles: tmlib.workflow.jterator.description.HandleDescriptions
description of module input/output as provided
'''
self.name = name
self.source_file = source_file
self.handles = handles
self.outputs = dict()
self.persistent_store = dict()
def build_figure_filename(self, figures_dir, job_id):
'''Builds name of figure file into which module will write figure
output of the current job.
Parameters
----------
figures_dir: str
path to directory for figure output
job_id: int
one-based job index
Returns
-------
str
absolute path to the figure file
'''
return os.path.join(figures_dir, '%s_%.5d.json' % (self.name, job_id))
@property
def keyword_arguments(self):
'''dict: name and value of each input handle as key-value pairs'''
kwargs = collections.OrderedDict()
for handle in self.handles.input:
kwargs[handle.name] = handle.value
return kwargs
@property
def language(self):
'''str: language of the module (e.g. "Python")'''
return determine_language(self.source_file)
def _exec_m_module(self, engine):
module_name = os.path.splitext(os.path.basename(self.source_file))[0]
logger.debug(
'import module "%s" from source file: %s',
module_name, self.source_file
)
# FIXME: this inserts the wrong directory if the MATLAB module
# has the form `+directory` or `@directory` -- let's allow
# this for the moment since the module discovery code only
# deals with single-file modules, but it needs to be revisited
source_dir = os.path.dirname(self.source_file)
logger.debug(
'adding module source directory `%s` to MATLAB path ...',
source_dir
)
engine.eval(
"addpath('{0}');".format(source_dir)
)
engine.eval('version = {0}.VERSION'.format(module_name))
function_call_format_string = '[{outputs}] = {name}.main({inputs});'
# NOTE: Matlab doesn't add imported classes to the workspace. It access
# the "VERSION" property, we need to assign it to a variable first.
version = engine.get('version')
if version != self.handles.version:
raise PipelineRunError(
'Version of source and handles is not the same.'
)
kwargs = self.keyword_arguments
logger.debug(
'evaluate main() function with INPUTS: "%s"',
'", "'.join(kwargs.keys())
)
output_names = [handle.name for handle in self.handles.output]
func_call_string = function_call_format_string.format(
outputs=', '.join(output_names),
name=module_name,
inputs=', '.join(kwargs.keys())
)
# Add arguments as variable in Matlab session
for name, value in kwargs.iteritems():
engine.put(name, value)
# Evaluate the function call
# NOTE: Unfortunately, the matlab_wrapper engine doesn't return
# standard output and error (exceptions are caught, though).
# TODO: log to file
engine.eval(func_call_string)
for handle in self.handles.output:
val = engine.get('%s' % handle.name)
if isinstance(val, np.ndarray):
# Matlab returns arrays in Fortran order
handle.value = val.copy(order='C')
else:
handle.value = val
return self.handles.output
def _exec_py_module(self):
module_name = os.path.splitext(os.path.basename(self.source_file))[0]
logger.debug(
'import module "%s" from source file: %s',
module_name, self.source_file
)
module = imp.load_source(module_name, self.source_file)
if module.VERSION != self.handles.version:
raise PipelineRunError(
'Version of source and handles is not the same.'
)
func = getattr(module, 'main', None)
if func is None:
raise PipelineRunError(
'Module source file "%s" must contain a "main" function.'
% module_name
)
kwargs = self.keyword_arguments
logger.debug(
'evaluate main() function with INPUTS: "%s"',
'", "'.join(kwargs.keys())
)
py_out = func(**kwargs)
# TODO: We could import the output class and check for its type.
if not isinstance(py_out, tuple):
raise PipelineRunError(
'Module "%s" must return an object of type tuple.' % self.name
)
# Modules return a namedtuple.
for handle in self.handles.output:
if not hasattr(py_out, handle.name):
raise PipelineRunError(
'Module "%s" didn\'t return output argument "%s".'
% (self.name, handle.name)
)
handle.value = getattr(py_out, handle.name)
return self.handles.output
def _exec_r_module(self):
try:
import rpy2.robjects
from rpy2.robjects import numpy2ri
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
except ImportError:
raise ImportError(
'R module cannot be run, because '
'"rpy2" package is not installed.'
)
module_name = os.path.splitext(os.path.basename(self.source_file))[0]
logger.debug(
'import module "%s" from source file: %s', self.source_file
)
logger.debug('source module: "%s"', self.source_file)
rpy2.robjects.r('source("{0}")'.format(self.source_file))
module = rpy2.robjects.r[module_name]
version = module.get('VERSION')[0]
if version != self.handles.version:
raise PipelineRunError(
'Version of source and handles is not the same.'
)
func = module.get('main')
numpy2ri.activate() # enables use of numpy arrays
pandas2ri.activate() # enable use of pandas data frames
kwargs = self.keyword_arguments
logger.debug(
'evaluate main() function with INPUTS: "%s"',
'", "'.join(kwargs.keys())
)
# R doesn't have unsigned integer types
for k, v in kwargs.iteritems():
if isinstance(v, np.ndarray):
if v.dtype == np.uint16 or v.dtype == np.uint8:
logging.debug(
'module "%s" input argument "%s": '
'convert unsigned integer data type to integer',
self.name, k
)
kwargs[k] = v.astype(int)
elif isinstance(v, pd.DataFrame):
# TODO: We may have to translate pandas data frames explicitly
# into the R equivalent.
# pandas2ri.py2ri(v)
kwargs[k] = v
args = rpy2.robjects.ListVector({k: v for k, v in kwargs.iteritems()})
base = importr('base')
r_out = base.do_call(func, args)
for handle in self.handles.output:
# NOTE: R functions are supposed to return a list. Therefore
# we can extract the output argument using rx2().
# The R equivalent would be indexing the list with "[[]]".
if isinstance(r_out.rx2(handle.name), rpy2.robjects.vectors.DataFrame):
handle.value = pandas2ri.ri2py(r_out.rx2(handle.name))
# handle.value = pd.DataFrame(r_out.rx2(handle.name))
else:
# NOTE: R doesn't have an unsigned integer data type.
# So we cast to uint16.
handle.value = numpy2ri.ri2py(r_out.rx2(handle.name)).astype(
np.uint16
)
# handle.value = np.array(r_out.rx2(handle.name), np.uint16)
return self.handles.output
def update_handles(self, store, headless=True):
'''Updates values of handles that define the arguments of the
module function.
Parameters
----------
store: dict
in-memory key-value store
headless: bool, optional
whether plotting should be disabled (default: ``True``)
Returns
-------
List[tmlib.jterator.handles.Handle]
handles for input keyword arguments
Note
----
This method must be called BEFORE calling
::meth:`tmlib.jterator.module.Module.run`.
'''
logger.debug('update handles')
for handle in self.handles.input:
if isinstance(handle, hdls.PipeHandle):
try:
handle.value = store['pipe'][handle.key]
except KeyError:
raise PipelineRunError(
'Value for argument "%s" was not created upstream '
'in the pipeline: %s' % (self.name, handle.key)
)
except Exception:
raise
elif isinstance(handle, hdls.Plot) and headless:
# Overwrite to enforce headless mode if required.
handle.value = False
return self.handles.input
def _get_objects_name(self, handle):
'''Determines the name of the segmented objects that are referenced by
a `Features` handle.
Parameters
----------
handle: tmlib.workflow.jterator.handle.Features
output handle with a `objects` attribute, which provides a
reference to an input handle
Returns
-------
str
name of the referenced segmented objects
'''
objects_names = [
h.key for h in self.handles.input + self.handles.output
if h.name == handle.objects and
isinstance(h, hdls.SegmentedObjects)
]
if len(objects_names) == 0:
raise PipelineRunError(
'Invalid object for "%s" in module "%s": %s'
% (handle.name, self.name, handle.objects)
)
return objects_names[0]
def _get_reference_objects_name(self, handle):
'''Determines the name of the segmented objects that are referenced by
a `Features` handle.
Parameters
----------
handle: tmlib.workflow.jterator.handle.Features
output handle with a `objects_ref` attribute, which provides a
reference to an input handle
Returns
-------
str
name of the referenced segmented objects
'''
objects_names = [
h.key for h in self.handles.input
if h.name == handle.objects_ref and
isinstance(h, hdls.SegmentedObjects)
]
if len(objects_names) == 0:
raise PipelineRunError(
'Invalid object reference for "%s" in module "%s": %s'
% (handle.name, self.name, handle.objects)
)
return objects_names[0]
def _get_reference_channel_name(self, handle):
'''Determines the name of the channel that is referenced by a
`Features` handle.
Parameters
----------
handle: tmlib.workflow.jterator.handle.Features
output handle with a `channel_ref` attribute, which provides a
reference to an input handle
Returns
-------
str
name of the referenced channel
'''
if handle.channel_ref is None:
return None
channel_names = [
h.key for h in self.handles.input
if h.name == handle.channel_ref and
isinstance(h, hdls.IntensityImage)
]
if len(channel_names) == 0:
raise PipelineRunError(
'Invalid channel reference for "%s" in module "%s": %s'
% (handle.name, self.name, handle.channel_ref)
)
return channel_names[0]
def update_store(self, store):
'''Updates `store` with key-value pairs that were returned by the
module function.
Parameters
----------
store: dict
in-memory key-value store
Returns
-------
store: dict
updated in-memory key-value store
Note
----
This method must be called AFTER calling
::meth:`tmlib.jterator.module.Module.run`.
'''
logger.debug('update store')
for i, handle in enumerate(self.handles.output):
if isinstance(handle, hdls.Figure):
logger.debug('add value of Figure handle to store')
store['current_figure'] = handle.value
elif isinstance(handle, hdls.SegmentedObjects):
logger.debug('add value of SegmentedObjects handle to store')
# Measurements need to be reset.
handle.measurements = []
store['objects'][handle.key] = handle
store['pipe'][handle.key] = handle.value
elif isinstance(handle, hdls.Measurement):
logger.debug('add value of Measurement handle to store')
ref_objects_name = self._get_reference_objects_name(handle)
objects_name = self._get_objects_name(handle)
ref_channel_name = self._get_reference_channel_name(handle)
new_names = list()
for name in handle.value[0].columns:
if ref_objects_name != objects_name:
new_name = '%s_%s' % (ref_objects_name, name)
else:
new_name = str(name) # copy
if ref_channel_name is not None:
new_name += '_%s' % ref_channel_name
new_names.append(new_name)
for t in range(len(handle.value)):
handle.value[t].columns = new_names
store['objects'][objects_name].add_measurement(handle)
else:
store['pipe'][handle.key] = handle.value
return store
def run(self, engine=None):
'''Executes a module, i.e. evaluate the corresponding function with
the keyword arguments provided by
:class:`tmlib.workflow.jterator.handles`.
Parameters
----------
engine: matlab_wrapper.matlab_session.MatlabSession, optional
engine for non-Python languages, such as Matlab (default: ``None``)
Note
----
Call ::meth:`tmlib.jterator.module.Module.update_handles` before
calling this method and
::meth:`tmlib.jterator.module.Module.update_store` afterwards.
'''
if self.language == 'Python':
return self._exec_py_module()
elif self.language == 'Matlab':
return self._exec_m_module(engine)
elif self.language == 'R':
return self._exec_r_module()
else:
raise PipelineRunError('Language not supported.')
def __str__(self):
return (
'<%s(name=%r, source=%r)>'
% (self.__class__.__name__, self.name, self.source_file)
)
| agpl-3.0 |
CIFASIS/pylearn2 | pylearn2/sandbox/cuda_convnet/bench.py | 44 | 3589 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
| bsd-3-clause |
pathomx/pathomx | pathomx/plugins/spectra/spectra_norm.py | 2 | 1270 | import numpy as np
import pandas as pd
# Abs the data (so account for negative peaks also)
data_a = np.abs(input_data.values)
# Sum each spectra (TSA)
data_as = np.sum(data_a, axis=1)
# Identify median
median_s = np.median(data_as)
# Scale others to match (*(median/row))
scaling = median_s / data_as
# Scale the spectra
tsa_data = input_data.T * scaling
tsa_data = tsa_data.T
if config['algorithm'] == 'TSA':
output_data = tsa_data
elif config['algorithm'] == 'PQN':
# Take result of TSA normalization
# Calculate median spectrum (median of each variable)
median_s = np.median(tsa_data, axis=0)
# For each variable of each spectrum, calculate ratio between median spectrum variable and that of the considered spectrum
spectra_r = median_s / np.abs(input_data)
# Take the median of these scaling factors
scaling = np.median(spectra_r, axis=1)
#Apply to the entire considered spectrum
output_data = input_data.T * scaling
output_data = output_data.T
data = None
# Clear so not expored
data_a = None
data_as = None
media_as = None
scaling = None
spectra_r = None
tsa_data = None
# Generate simple result figure (using pathomx libs)
from pathomx.figures import spectra
View = spectra(output_data, styles=styles)
View
| gpl-3.0 |
karllessard/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 2 | 43498 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.text_input = [['abc']]
self.bytes_input = [[b'abc']]
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_shuffle_correctness(self):
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter).numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batch_shuffle_correctness(self):
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor_v2_with_dispatch(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_shuffle_correctness(self):
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter).numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batch_shuffle_correctness(self):
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@testing_utils.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_not_shuffled(self):
def generator():
for i in range(10):
yield np.ones((1, 1)) * i
adapter = self.adapter_cls(generator(), shuffle=True)
for i, data in enumerate(adapter.get_dataset()):
self.assertEqual(i, data[0].numpy().flatten())
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@testing_utils.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
class DataHandlerTest(keras_parameterized.TestCase):
def test_finite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertEqual(data_handler.inferred_steps, 2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
def test_finite_dataset_without_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1)
data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, 3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_finite_dataset_with_steps_per_epoch_exact_size(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# If user specifies exact size of `Dataset` as `steps_per_epoch`,
# create a new iterator each epoch.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=4)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
def test_infinite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat()
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_unknown_cardinality_dataset_with_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
self.assertEqual(data_handler.inferred_steps, 2)
def test_unknown_cardinality_dataset_without_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, None)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
with data_handler.catch_stop_iteration():
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
self.assertEqual(data_handler.inferred_steps, 4)
def test_insufficient_data(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1])
ds = ds.filter(lambda *args, **kwargs: True)
data_handler = data_adapter.DataHandler(
ds, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
with data_handler.catch_stop_iteration():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertTrue(data_handler._insufficient_data)
self.assertEqual(returned_data, [[0, 1]])
def test_numpy(self):
x = np.array([0, 1, 2])
y = np.array([0, 2, 4])
sw = np.array([0, 4, 8])
data_handler = data_adapter.DataHandler(
x=x, y=y, sample_weight=sw, batch_size=1, epochs=2)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data,
[[(0, 0, 0), (1, 2, 4),
(2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]])
def test_generator(self):
def generator():
for _ in range(2):
for step in range(3):
yield (ops.convert_to_tensor_v2_with_dispatch([step]),)
data_handler = data_adapter.DataHandler(
generator(), epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_composite_tensor(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1])
data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(
nest.map_structure(sparse_ops.sparse_tensor_to_dense, returned_data))
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_list_of_scalars(self):
data_handler = data_adapter.DataHandler([[0], [1], [2]],
epochs=2,
steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_class_weight_user_errors(self):
with self.assertRaisesRegex(ValueError, 'to be a dict with keys'):
data_adapter.DataHandler(
x=[[0], [1], [2]],
y=[[2], [1], [0]],
batch_size=1,
sample_weight=[[1.], [2.], [4.]],
class_weight={
0: 0.5,
1: 1.,
3: 1.5 # Skips class `2`.
})
with self.assertRaisesRegex(ValueError, 'with a single output'):
data_adapter.DataHandler(
x=np.ones((10, 1)),
y=[np.ones((10, 1)), np.zeros((10, 1))],
batch_size=2,
class_weight={
0: 0.5,
1: 1.,
2: 1.5
})
@parameterized.named_parameters(('numpy', True), ('dataset', False))
def test_single_x_input_no_tuple_wrapping(self, use_numpy):
x = np.ones((10, 1))
if use_numpy:
batch_size = 2
else:
x = dataset_ops.Dataset.from_tensor_slices(x).batch(2)
batch_size = None
data_handler = data_adapter.DataHandler(x, batch_size=batch_size)
for _, iterator in data_handler.enumerate_epochs():
for _ in data_handler.steps():
# Check that single x input is not wrapped in a tuple.
self.assertIsInstance(next(iterator), ops.Tensor)
class TestValidationSplit(keras_parameterized.TestCase):
@parameterized.named_parameters(('numpy_arrays', True), ('tensors', False))
def test_validation_split_unshuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2_with_dispatch([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw), validation_split=0.2))
if use_numpy:
train_x = ops.convert_to_tensor_v2_with_dispatch(train_x)
train_y = ops.convert_to_tensor_v2_with_dispatch(train_y)
train_sw = ops.convert_to_tensor_v2_with_dispatch(train_sw)
val_x = ops.convert_to_tensor_v2_with_dispatch(val_x)
val_y = ops.convert_to_tensor_v2_with_dispatch(val_y)
val_sw = ops.convert_to_tensor_v2_with_dispatch(val_sw)
self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3])
self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6])
self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12])
self.assertEqual(val_x.numpy().tolist(), [4])
self.assertEqual(val_y.numpy().tolist(), [8])
self.assertEqual(val_sw.numpy().tolist(), [16])
def test_validation_split_user_error(self):
with self.assertRaisesRegex(ValueError, 'is only supported for Tensors'):
data_adapter.train_validation_split(
lambda: np.ones((10, 1)), validation_split=0.2)
def test_validation_split_examples_too_few(self):
with self.assertRaisesRegex(ValueError, 'not sufficient to split it'):
data_adapter.train_validation_split(
np.ones((1, 10)), validation_split=0.2)
def test_validation_split_none(self):
train_sw, val_sw = data_adapter.train_validation_split(
None, validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
(_, train_sw), (_, val_sw) = data_adapter.train_validation_split(
(np.ones((10, 1)), None), validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
class ListsOfScalarsDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(ListsOfScalarsDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.ListsOfScalarsDataAdapter
def test_can_list_inputs(self):
self.assertTrue(self.adapter_cls.can_handle(self.text_input))
self.assertTrue(self.adapter_cls.can_handle(self.bytes_input))
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
class TestUtils(keras_parameterized.TestCase):
def test_expand_1d_sparse_tensors_untouched(self):
st = sparse_tensor.SparseTensor(
indices=[[0], [10]], values=[1, 2], dense_shape=[10])
st = data_adapter.expand_1d(st)
self.assertEqual(st.shape.rank, 1)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
vivekmishra1991/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| gpl-3.0 |
jswoboda/ISRSpectrum | Examples/singleparticleacf.py | 1 | 4451 | #!/usr/bin/env python
"""
Created on Fri Apr 22 14:42:49 2016
@author: John Swoboda
"""
from ISRSpectrum import Path
import numpy as np
import scipy.fftpack as fftsy
import scipy.special
import scipy.constants as spconst
import matplotlib.pylab as plt
from matplotlib import rc
#
from ISRSpectrum.ISRSpectrum import magacf, collacf, magncollacf
from ISRSpectrum import chirpz, sommerfeldchirpz, sommerfelderf, sommerfelderfrep
if __name__== '__main__':
# directory stuff
curdir = Path(__file__).parent
ISRdir = curdir.parent
#%% Sim set up
centerFrequency = 440.2*1e6
nspec=129
sampfreq=50e3
bMag = 0.4e-4
Ts = 1e3
Partdict = {0:('Electron',spconst.m_e,spconst.e),1:('H+ Ion',spconst.m_p,spconst.e),2:('O+ Ion',16*spconst.m_p,spconst.e)}
particle = Partdict[0]
pname = particle[0]
ms = particle[1]
q_ch = particle[2]
K = 2.0*np.pi*2*centerFrequency/spconst.c
f = np.arange(-np.ceil((nspec-1.0)/2.0),np.floor((nspec-1.0)/2.0+1))*(sampfreq/(2*np.ceil((nspec-1.0)/2.0)))
C = np.sqrt(spconst.k*Ts/ms)
Om = q_ch*bMag/ms
omeg_s = 2.0*np.pi*f
theta = omeg_s/(K*C*np.sqrt(2.0))
dtau = 2e-2*2.0/(K*C*np.sqrt(2.0))
N = 2**12
tau = np.arange(N)*dtau
d2r = np.pi/180.0
#%% No collisions or magnetic field
gordnn = np.exp(-np.power(C*K*tau,2.0)/2.0)
plt.figure()
plt.plot(tau,gordnn,linewidth=3)
plt.title(r'Single Particle ACF for ' +pname)
plt.grid(True)
plt.savefig('ACF'+pname.replace(" ", "")+'.png')
#%% With collisions
nuvec = np.logspace(-2.0,2.0,10)*K*C
taumat = np.tile(tau[np.newaxis,:],(len(nuvec),1))
numat = np.tile(nuvec[:,np.newaxis],(1,len(tau)))
gordnun = collacf(taumat,K,C,numat)
#np.exp(-np.power(K*C/numat,2.0)*(numat*taumat-1+np.exp(-numat*taumat)))
plt.figure()
plt.plot(tau,gordnn,linestyle='--',color='b',linewidth=4,label=r'No Collisions')
for inun, inu in enumerate(nuvec):
numult = inu/(K*C)
plt.plot(tau,gordnun[inun].real,linewidth=3,label=r'$\nu = {:.2f} KC$'.format(numult))
plt.grid(True)
plt.title(r'Single Particle ACF W/ Collisions for '+pname)
plt.legend()
plt.savefig('ACFwcolls'+pname.replace(" ", "")+'.png')
#%% With magnetic field
alpha = np.linspace(19,1,10)
taumat = np.tile(tau[np.newaxis,:],(len(alpha),1))
almat = np.tile(alpha[:,np.newaxis],(1,len(tau)))
Kpar = np.sin(d2r*almat)*K
Kperp = np.cos(d2r*almat)*K
# gordmag = np.exp(-np.power(C*Kpar*taumat,2.0)/2.0-2.0*np.power(Kperp*C*np.sin(Om*taumat/2.0)/Om,2.0))
gordmag = magacf(taumat,K,C,d2r*almat,Om)
plt.figure()
plt.plot(tau,gordnn,linestyle='--',color='b',linewidth=4,label='No B-field')
for ialn, ial in enumerate(alpha):
plt.plot(tau,gordmag[ialn].real,linewidth=3,label=r'$\alpha = {:.0f}^\circ$'.format(ial))
plt.grid(True)
plt.title('Single Particle ACF W/ Mag for ' +pname)
plt.legend()
plt.savefig('ACFwmag'+pname.replace(" ", "")+'.png')
#%% Error surface with both
almat3d = np.tile(alpha[:,np.newaxis,np.newaxis],(1,len(nuvec),len(tau)))
numat3d = np.tile(nuvec[np.newaxis,:,np.newaxis],(len(alpha),1,len(tau)))
taumat3d = np.tile(tau[np.newaxis,np.newaxis,:],(len(alpha),len(nuvec),1))
Kpar3d = np.sin(d2r*almat3d)*K
Kperp3d = np.cos(d2r*almat3d)*K
gam = np.arctan(numat3d/Om)
# deltl = np.exp(-np.power(Kpar3d*C/numat3d,2.0)*(numat3d*taumat3d-1+np.exp(-numat3d*taumat3d)))
# deltp = np.exp(-np.power(C*Kperp3d,2.0)/(Om*Om+numat3d*numat3d)*(np.cos(2*gam)+numat3d*taumat3d-np.exp(-numat3d*taumat3d)*(np.cos(Om*taumat3d-2.0*gam))))
# gordall = deltl*deltp
gordall = magncollacf(taumat3d,K,C,d2r*almat3d,Om,numat3d)
gordnnmat = np.tile(gordnn[np.newaxis,np.newaxis,:],(len(alpha),len(nuvec),1))
gorddiff = np.abs(gordall-gordnnmat)**2
err = np.sqrt(gorddiff.mean(2))/np.sqrt(np.power(gordnn,2.0).sum())
extent = [np.log10(nuvec[0]/(K*C)),np.log10(nuvec[-1]/(K*C)),alpha[0],alpha[-1]]
plt.figure()
myim = plt.imshow(err*100,extent = extent,origin='lower',aspect='auto')
myim.set_clim(0.0,5.)
plt.xlabel(r'$\log_{10}(\nu /KC)$')
plt.ylabel(r'$^\circ\alpha$')
cbar = plt.colorbar()
cbar.set_label('% Error', rotation=270)
cbar.ax.get_yaxis().labelpad = 15
plt.title('Error between ideal ACF and with Collisions and B-field for '+pname)
plt.savefig('ACFerr'+pname.replace(" ", "")+'.png')
| mit |
ScienceStacks/jupyter_scisheets_widget | jupyter_scisheets_widget/scisheets_widget.py | 1 | 2851 | import ast
import json
import StringIO
import ipywidgets as widgets
import numpy as np
import pandas as pd
from IPython.display import display
from traitlets import Unicode
from traitlets import default
from traitlets import List
class SciSheetTable(widgets.DOMWidget):
# Name of the view in JS
_view_name = Unicode('SciSheetTableView').tag(sync=True)
# Name of the model in JS
_model_name = Unicode('SciSheetTableModel').tag(sync=True)
# Namespace for the view (name of JS package)
_view_module = Unicode('jupyter_scisheets_widget').tag(sync=True)
# Namespace for the module (name of JS package)
_model_module = Unicode('jupyter_scisheets_widget').tag(sync=True)
# Defines the data (contents of cells)
_model_data = Unicode().tag(sync=True)
# Defines the header information
_model_header = Unicode().tag(sync=True)
# Defines the index information
_model_index = Unicode().tag(sync=True)
# Set the default layout
@default('layout')
def _default_layout(self):
return widgets.Layout(height='400px', align_self='stretch')
class HandsonDataFrame(object):
def __init__(self, df):
self._df = df
self._widget = SciSheetTable()
self._on_displayed(self)
self._widget.observe(self._on_data_changed, '_model_data')
self._widget.unobserve(self._on_displayed)
def _on_displayed(self, e):
"""
Converts DataFrame to json and defines self._widget values
"""
if type(self._df) == pd.core.frame.DataFrame:
model_data = self._df.to_json(orient='split')
model_data = ast.literal_eval(model_data)
self._widget._model_data = json.dumps(model_data['data'])
self._widget._model_header = json.dumps(model_data['columns'])
self._widget._model_index = json.dumps(model_data['index'])
else:
print('Please enter a pandas dataframe')
def _on_data_changed(self, e):
"""
Pulls data from the handsontable whenever the user changes a value
in the table
"""
print('data is being changed')
data_dict = ast.literal_eval(self._widget._model_data)
col_dict = ast.literal_eval(self._widget._model_header)
index_dict = ast.literal_eval(self._widget._model_index)
updated_df = pd.DataFrame(data=data_dict, index=index_dict,
columns=col_dict)
# Note this will have to be more robust if new rows and/or
# columns are added to the widget
self._df.update(updated_df)
def to_dataframe(self):
"""
Update the original DataFrame
"""
return self._df
def show(self):
"""
Display the widget
"""
display(self._widget)
| bsd-3-clause |
gpfreitas/bokeh | bokeh/_legacy_charts/builder/boxplot_builder.py | 41 | 11882 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the BoxPlot class which lets you build your BoxPlot plots just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
import pandas as pd
from ..utils import make_scatter, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Rect, Segment
from ...properties import Bool, String
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def BoxPlot(values, marker="circle", outliers=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kw):
""" Create a BoxPlot chart using :class:`BoxPlotBuilder <bokeh.charts.builder.boxplot_builder.BoxPlotBuilder>`
to render the geometry from values, marker and outliers arguments.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., `circle`.
outliers (bool, optional): Whether or not to plot outliers.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import BoxPlot, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames of arrays are valid inputs)
medals = dict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
boxplot = BoxPlot(medals, marker="circle", outliers=True, title="boxplot",
xlabel="medal type", ylabel="medal count")
output_file('boxplot.html')
show(boxplot)
"""
return create_and_build(
BoxPlotBuilder, values, marker=marker, outliers=outliers,
xscale=xscale, yscale=yscale, xgrid=xgrid, ygrid=ygrid, **kw
)
class BoxPlotBuilder(Builder):
"""This is the BoxPlot class and it is in charge of plotting
scatter plots in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (rects, lines and markers)
taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String(help="""
The marker type to use (e.g., ``circle``) if outliers=True.
""")
outliers = Bool(help="""
Whether to display markers for any outliers.
""")
def _process_data(self):
"""Take the BoxPlot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad, segments and markers glyphs inside the ``_yield_renderers`` method.
Args:
cat (list): categories as a list of strings.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., ``circle``.
outliers (bool, optional): Whether to plot outliers.
values (dict or pd obj): the values to be plotted as bars.
"""
self._data_segment = dict()
self._attr_segment = []
self._data_rect = dict()
self._attr_rect = []
self._data_scatter = dict()
self._attr_scatter = []
self._data_legend = dict()
if isinstance(self._values, pd.DataFrame):
self._groups = self._values.columns
else:
self._groups = list(self._values.keys())
# add group to the self._data_segment dict
self._data_segment["groups"] = self._groups
# add group and witdh to the self._data_rect dict
self._data_rect["groups"] = self._groups
self._data_rect["width"] = [0.8] * len(self._groups)
# self._data_scatter does not need references to groups now,
# they will be added later.
# add group to the self._data_legend dict
self._data_legend["groups"] = self._groups
# all the list we are going to use to save calculated values
q0_points = []
q2_points = []
iqr_centers = []
iqr_lengths = []
lower_points = []
upper_points = []
upper_center_boxes = []
upper_height_boxes = []
lower_center_boxes = []
lower_height_boxes = []
out_x, out_y, out_color = ([], [], [])
colors = cycle_colors(self._groups, self.palette)
for i, (level, values) in enumerate(self._values.items()):
# Compute quantiles, center points, heights, IQR, etc.
# quantiles
q = np.percentile(values, [25, 50, 75])
q0_points.append(q[0])
q2_points.append(q[2])
# IQR related stuff...
iqr_centers.append((q[2] + q[0]) / 2)
iqr = q[2] - q[0]
iqr_lengths.append(iqr)
lower = q[0] - 1.5 * iqr
upper = q[2] + 1.5 * iqr
lower_points.append(lower)
upper_points.append(upper)
# rect center points and heights
upper_center_boxes.append((q[2] + q[1]) / 2)
upper_height_boxes.append(q[2] - q[1])
lower_center_boxes.append((q[1] + q[0]) / 2)
lower_height_boxes.append(q[1] - q[0])
# Store indices of outliers as list
outliers = np.where(
(values > upper) | (values < lower)
)[0]
for out in outliers:
o = values[out]
out_x.append(level)
out_y.append(o)
out_color.append(colors[i])
# Store
self.set_and_get(self._data_scatter, self._attr_scatter, "out_x", out_x)
self.set_and_get(self._data_scatter, self._attr_scatter, "out_y", out_y)
self.set_and_get(self._data_scatter, self._attr_scatter, "colors", out_color)
self.set_and_get(self._data_segment, self._attr_segment, "q0", q0_points)
self.set_and_get(self._data_segment, self._attr_segment, "lower", lower_points)
self.set_and_get(self._data_segment, self._attr_segment, "q2", q2_points)
self.set_and_get(self._data_segment, self._attr_segment, "upper", upper_points)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_centers", iqr_centers)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_lengths", iqr_lengths)
self.set_and_get(self._data_rect, self._attr_rect, "upper_center_boxes", upper_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "upper_height_boxes", upper_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_center_boxes", lower_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_height_boxes", lower_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "colors", colors)
def _set_sources(self):
"Push the BoxPlot data into the ColumnDataSource and calculate the proper ranges."
self._source_segment = ColumnDataSource(self._data_segment)
self._source_scatter = ColumnDataSource(self._data_scatter)
self._source_rect = ColumnDataSource(self._data_rect)
self._source_legend = ColumnDataSource(self._data_legend)
self.x_range = FactorRange(factors=self._source_segment.data["groups"])
start_y = min(self._data_segment[self._attr_segment[1]])
end_y = max(self._data_segment[self._attr_segment[3]])
## Expand min/max to encompass outliers
if self.outliers and self._data_scatter[self._attr_scatter[1]]:
start_out_y = min(self._data_scatter[self._attr_scatter[1]])
end_out_y = max(self._data_scatter[self._attr_scatter[1]])
# it could be no outliers in some sides...
start_y = min(start_y, start_out_y)
end_y = max(end_y, end_out_y)
self.y_range = Range1d(start=start_y - 0.1 * (end_y - start_y),
end=end_y + 0.1 * (end_y - start_y))
def _yield_renderers(self):
"""Use the several glyphs to display the Boxplot.
It uses the selected marker glyph to display the points, segments to
display the iqr and rects to display the boxes, taking as reference
points the data loaded at the ColumnDataSurce.
"""
ats = self._attr_segment
glyph = Segment(
x0="groups", y0=ats[1], x1="groups", y1=ats[0],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
glyph = Segment(
x0="groups", y0=ats[2], x1="groups", y1=ats[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
atr = self._attr_rect
glyph = Rect(
x="groups", y=atr[0], width="width", height=atr[1],
line_color="black", line_width=2, fill_color=None,
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[2], width="width", height=atr[3],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[4], width="width", height=atr[5],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
if self.outliers:
yield make_scatter(self._source_scatter, self._attr_scatter[0],
self._attr_scatter[1], self.marker,
self._attr_scatter[2])
# Some helper methods
def set_and_get(self, data, attr, val, content):
"""Set a new attr and then get it to fill the self._data dict.
Keep track of the attributes created.
Args:
data (dict): where to store the new attribute content
attr (list): where to store the new attribute names
val (string): name of the new attribute
content (obj): content of the new attribute
"""
self._set_and_get(data, "", attr, val, content)
| bsd-3-clause |
WaveBlocks/libwaveblocks | scripts/plot_propagator_convergence.py | 1 | 1347 | #!/usr/bin/env python
import sys
import numpy as np
import csv
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
#######################################
# Convergence Analysis
#######################################
prop_list = sys.argv[1:]
if not len(prop_list):
print ('TOO FEW ARGUMENTS: Please pass the error files as command line parameters');
fig = plt.figure()
ax = fig.gca()
for p in prop_list:
with open(p, 'rb') as f:
data = csv.reader(row for row in f if (not row.startswith('#') and row.strip()));
meta = data.next();
name, coefs, T = meta;
print; print(meta);
conv = []
for row in data:
conv.append(row);
Dt = np.array(conv)[:,0]
err = np.array(conv)[:,1]
ax.loglog(Dt, err, '-o', label=name+' (' + coefs + ' splitting)')
lgd = ax.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)
# ax.set_xlim([5e1,1e5])
# ax.set_ylim(view[2:])
# ax.ticklabel_format(style="sci", scilimits=(0, 0), axis="y")
ax.set_title(r"$L_2$ error vs. number of step size (T=" + T + ")");
ax.grid(True);
ax.set_xlabel(r"step size $\Delta t$");
ax.set_ylabel(r"$L_2$ error $\frac{\| u (x) - u_* (x) \|}{\| u_* (x) \|}$")
fig.savefig("error_analysis.png",bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
plt.close(fig)
| gpl-2.0 |
InContextSolutions/PandaSurvey | PandaSurvey/__init__.py | 1 | 4342 | """PandaSurvey includes two unique datasets for testing purpuses: `People` and a sample study. The `People` file is from the 2010 US Census. The sample study is from a small survey performed at InContext Solutions in 2014 (specific survey details withheld)"""
import os
import pandas
def _path(name):
root, _ = os.path.split(__file__)
return os.path.join(root, 'data/' + name)
def load_people():
"""Returns the `People` dataset as a DataFrame. The data consists of 9999 individuals with age, disability status, marital status, race, and gender demographic information. Columns and their codes are described below:
- Age
- Non-negative integer
- May include zeros
- Disability
- 1: Disabled
- 2: Not disabled
- MarritalStatus
- 1: Married
- 2: Widowed
- 3: Divorced
- 4: Separated
- 5: Never married or under 15 years old
- Race
- 1: White alone
- 2: Black or African American alone
- 3: American Indian alone
- 4: Alaska Native alone
- 5: American Indian and Alaska Native tribes specified; or American Indian or Alaska native, not specified and no other races
- 6: Asian alone
- 7: Native Hawaiian and Other Pacific Islander alone
- 8: Some other race alone
- 9: Two or more major race groups
- Gender
- 1: Male
- 2: Female
"""
return pandas.read_csv(_path("People.csv"))
def load_sample_study():
"""Returns a sample dataset describing demographics in coded format from 2092 respondents. The study consists of 7 cells and demographics considered include age, gender, income, hispanic, and race."""
df = pandas.read_csv(_path("SampleStudy.csv"))
del df['Weight']
return df
def load_sample_weights():
"""Returns individual weights from the sample survey calculated via a raking method previously implemented in R."""
df = pandas.read_csv(_path("SampleStudy.csv"))
return df['Weight']
def load_sample_proportions():
"""Returns the target sample proportions that correspond to the sample survey.
+-------------+-------------+-------------------+
| Demographic | Coded Value | Target Proportion |
+=============+=============+===================+
| Age | 1 | 0.07 |
+-------------+-------------+-------------------+
| Age | 2 | 0.22 |
+-------------+-------------+-------------------+
| Age | 3 | 0.2 |
+-------------+-------------+-------------------+
| Age | 4 | 0.2 |
+-------------+-------------+-------------------+
| Age | 5 | 0.21 |
+-------------+-------------+-------------------+
| Gender | 1 | 0.5 |
+-------------+-------------+-------------------+
| Gender | 2 | 0.5 |
+-------------+-------------+-------------------+
| Income | 1 | 0.17 |
+-------------+-------------+-------------------+
| Income | 2 | 0.21 |
+-------------+-------------+-------------------+
| Income | 3 | 0.25 |
+-------------+-------------+-------------------+
| Income | 4 | 0.16 |
+-------------+-------------+-------------------+
| Income | 5 | 0.11 |
+-------------+-------------+-------------------+
| Hispanic | 1 | 0.09 |
+-------------+-------------+-------------------+
| Hispanic | 2 | 0.91 |
+-------------+-------------+-------------------+
| Race | 0 | 0.15 |
+-------------+-------------+-------------------+
| Race | 1 | 0.85 |
+-------------+-------------+-------------------+
"""
weights = {}
with open(_path("SampleWeights.csv")) as csv_in:
for line in csv_in:
demo, category, proportion = line.split(',')
if demo not in weights:
weights[demo] = {}
weights[demo][int(category)] = float(proportion)
return weights
| mit |
sunil07t/e-mission-server | emission/analysis/plotting/geojson/geojson_feature_converter.py | 1 | 14256 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import *
import logging
import geojson as gj
import copy
import attrdict as ad
import pandas as pd
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.timequery as estt
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.decorations.timeline as esdtl
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.cleanedsection as ecwcs
import emission.core.wrapper.entry as ecwe
import emission.core.common as ecc
# TODO: Move this to the section_features class instead
import emission.analysis.intake.cleaning.location_smoothing as eaicl
def _del_non_derializable(prop_dict, extra_keys):
for key in extra_keys:
if key in prop_dict:
del prop_dict[key]
def _stringify_foreign_key(prop_dict, key_names):
for key_name in key_names:
if hasattr(prop_dict, key_name):
setattr(prop_dict, key_name, str(getattr(prop_dict,key_name)))
def location_to_geojson(location):
"""
Converts a location wrapper object into geojson format.
This is pretty easy - it is a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param location: the location object
:return: a geojson version of the location. the object is of type "Feature".
"""
try:
ret_feature = gj.Feature()
ret_feature.id = str(location.get_id())
ret_feature.geometry = location.data.loc
ret_feature.properties = copy.copy(location.data)
ret_feature.properties["feature_type"] = "location"
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
except Exception as e:
logging.exception(("Error while converting object %s" % location))
raise e
def place_to_geojson(place):
"""
Converts a place wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param place: the place object
:return: a geojson version of the place. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(place.get_id())
ret_feature.geometry = place.data.location
ret_feature.properties = copy.copy(place.data)
ret_feature.properties["feature_type"] = "place"
# _stringify_foreign_key(ret_feature.properties, ["ending_trip", "starting_trip"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def stop_to_geojson(stop):
"""
Converts a stop wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param stop: the stop object
:return: a geojson version of the stop. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(stop.get_id())
ret_feature.geometry = gj.LineString()
ret_feature.geometry.coordinates = [stop.data.enter_loc.coordinates, stop.data.exit_loc.coordinates]
ret_feature.properties = copy.copy(stop.data)
ret_feature.properties["feature_type"] = "stop"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def section_to_geojson(section, tl):
"""
This is the trickiest part of the visualization.
The section is basically a collection of points with a line through them.
So the representation is a feature in which one feature which is the line, and one feature collection which is the set of point features.
:param section: the section to be converted
:return: a feature collection which is the geojson version of the section
"""
ts = esta.TimeSeries.get_time_series(section.user_id)
entry_it = ts.find_entries(["analysis/recreated_location"],
esda.get_time_query_for_trip_like(
"analysis/cleaned_section",
section.get_id()))
# TODO: Decide whether we want to use Rewrite to use dataframes throughout instead of python arrays.
# dataframes insert nans. We could use fillna to fill with default values, but if we are not actually
# using dataframe features here, it is unclear how much that would help.
feature_array = []
section_location_entries = [ecwe.Entry(entry) for entry in entry_it]
if len(section_location_entries) != 0:
logging.debug("first element in section_location_array = %s" % section_location_entries[0])
if not ecc.compare_rounded_arrays(section.data.end_loc.coordinates,
section_location_entries[-1].data.loc.coordinates,
digits=4):
logging.info("section_location_array[-1].data.loc %s != section.data.end_loc %s even after df.ts fix, filling gap" % \
(section_location_entries[-1].data.loc, section.data.end_loc))
assert(False)
last_loc_doc = ts.get_entry_at_ts("background/filtered_location", "data.ts", section.data.end_ts)
if last_loc_doc is None:
logging.warning("can't find entry to patch gap, leaving gap")
else:
last_loc_entry = ecwe.Entry(last_loc_doc)
logging.debug("Adding new entry %s to fill the end point gap between %s and %s"
% (last_loc_entry.data.loc, section_location_entries[-1].data.loc,
section.data.end_loc))
section_location_entries.append(last_loc_entry)
points_line_feature = point_array_to_line(section_location_entries)
points_line_feature.id = str(section.get_id())
points_line_feature.properties.update(copy.copy(section.data))
# Update works on dicts, convert back to a section object to make the modes
# work properly
points_line_feature.properties = ecwcs.Cleanedsection(points_line_feature.properties)
points_line_feature.properties["feature_type"] = "section"
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
_del_non_derializable(points_line_feature.properties, ["start_loc", "end_loc"])
# feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(points_line_feature)
return gj.FeatureCollection(feature_array)
def incident_to_geojson(incident):
ret_feature = gj.Feature()
ret_feature.id = str(incident.get_id())
ret_feature.geometry = gj.Point()
ret_feature.geometry.coordinates = incident.data.loc.coordinates
ret_feature.properties = copy.copy(incident.data)
ret_feature.properties["feature_type"] = "incident"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
def geojson_incidents_in_range(user_id, start_ts, end_ts):
MANUAL_INCIDENT_KEY = "manual/incident"
ts = esta.TimeSeries.get_time_series(user_id)
uc = enua.UserCache.getUserCache(user_id)
tq = estt.TimeQuery("data.ts", start_ts, end_ts)
incident_entry_docs = list(ts.find_entries([MANUAL_INCIDENT_KEY], time_query=tq)) \
+ list(uc.getMessage([MANUAL_INCIDENT_KEY], tq))
incidents = [ecwe.Entry(doc) for doc in incident_entry_docs]
return list(map(incident_to_geojson, incidents))
def point_array_to_line(point_array):
points_line_string = gj.LineString()
# points_line_string.coordinates = [l.loc.coordinates for l in filtered_section_location_array]
points_line_string.coordinates = []
points_times = []
for l in point_array:
# logging.debug("About to add %s to line_string " % l)
points_line_string.coordinates.append(l.data.loc.coordinates)
points_times.append(l.data.ts)
points_line_feature = gj.Feature()
points_line_feature.geometry = points_line_string
points_line_feature.properties = {}
points_line_feature.properties["times"] = points_times
return points_line_feature
def trip_to_geojson(trip, tl):
"""
Trips are the main focus of our current visualization, so they are most complex.
Each trip is represented as a feature collection with the following features:
- two features for the start and end places
- features for each stop in the trip
- features for each section in the trip
:param trip: the trip object to be converted
:param tl: the timeline used to retrieve related objects
:return: the geojson version of the trip
"""
feature_array = []
curr_start_place = tl.get_object(trip.data.start_place)
curr_end_place = tl.get_object(trip.data.end_place)
start_place_geojson = place_to_geojson(curr_start_place)
start_place_geojson["properties"]["feature_type"] = "start_place"
feature_array.append(start_place_geojson)
end_place_geojson = place_to_geojson(curr_end_place)
end_place_geojson["properties"]["feature_type"] = "end_place"
feature_array.append(end_place_geojson)
trip_tl = esdt.get_cleaned_timeline_for_trip(trip.user_id, trip.get_id())
stops = trip_tl.places
for stop in stops:
feature_array.append(stop_to_geojson(stop))
for i, section in enumerate(trip_tl.trips):
section_gj = section_to_geojson(section, tl)
feature_array.append(section_gj)
trip_geojson = gj.FeatureCollection(features=feature_array, properties=trip.data)
trip_geojson.id = str(trip.get_id())
feature_array.extend(geojson_incidents_in_range(trip.user_id,
curr_start_place.data.exit_ts,
curr_end_place.data.enter_ts))
if trip.metadata.key == esda.CLEANED_UNTRACKED_KEY:
# trip_geojson.properties["feature_type"] = "untracked"
# Since the "untracked" type is not correctly handled on the phone, we just
# skip these trips until
# https://github.com/e-mission/e-mission-phone/issues/118
# is fixed
# TODO: Once it is fixed, re-introduce the first line in this block
# and remove the None check in get_geojson_for_timeline
return None
else:
trip_geojson.properties["feature_type"] = "trip"
return trip_geojson
def get_geojson_for_ts(user_id, start_ts, end_ts):
tl = esdtl.get_cleaned_timeline(user_id, start_ts, end_ts)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_dt(user_id, start_local_dt, end_local_dt):
logging.debug("Getting geojson for %s -> %s" % (start_local_dt, end_local_dt))
tl = esdtl.get_cleaned_timeline_from_dt(user_id, start_local_dt, end_local_dt)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_timeline(user_id, tl):
"""
tl represents the "timeline" object that is queried for the trips and locations
"""
geojson_list = []
for trip in tl.trips:
try:
trip_geojson = trip_to_geojson(trip, tl)
if trip_geojson is not None:
geojson_list.append(trip_geojson)
except Exception as e:
logging.exception("Found error %s while processing trip %s" % (e, trip))
raise e
logging.debug("trip count = %d, geojson count = %d" %
(len(tl.trips), len(geojson_list)))
return geojson_list
def get_all_points_for_range(user_id, key, start_ts, end_ts):
import emission.storage.timeseries.timequery as estt
# import emission.core.wrapper.location as ecwl
tq = estt.TimeQuery("metadata.write_ts", start_ts, end_ts)
ts = esta.TimeSeries.get_time_series(user_id)
entry_it = ts.find_entries([key], tq)
points_array = [ecwe.Entry(entry) for entry in entry_it]
return get_feature_list_for_point_array(points_array)
def get_feature_list_for_point_array(points_array):
points_feature_array = [location_to_geojson(le) for le in points_array]
print ("Found %d features from %d points" %
(len(points_feature_array), len(points_array)))
feature_array = []
feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(point_array_to_line(points_array))
feature_coll = gj.FeatureCollection(feature_array)
return feature_coll
def get_feature_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
"""
Input DF should have columns called "ts", "latitude" and "longitude", or the corresponding
columns can be passed in using the ts, latitude and longitude parameters
"""
points_array = get_location_entry_list_from_df(loc_time_df, ts, latitude, longitude, fmt_time)
return get_feature_list_for_point_array(points_array)
def get_location_entry_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
location_entry_list = []
for idx, row in loc_time_df.iterrows():
retVal = {"latitude": row[latitude], "longitude": row[longitude], "ts": row[ts],
"_id": str(idx), "fmt_time": row[fmt_time], "loc": gj.Point(coordinates=[row[longitude], row[latitude]])}
location_entry_list.append(ecwe.Entry.create_entry(
"dummy_user", "background/location", ecwl.Location(retVal)))
return location_entry_list
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.