repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tacaswell/scikit-xray
|
doc/sphinxext/plot_generator.py
|
8
|
10150
|
"""
Sphinx plugin to run example scripts and create a gallery page.
Taken from seaborn project, which is turn was lightly
modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
|
bsd-3-clause
|
annoviko/pyclustering
|
pyclustering/tests/tests_runner.py
|
1
|
2851
|
"""!
@brief Test runner for unit and integration tests in the project.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import enum
import sys
import unittest
import warnings
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.tests.suite_holder import suite_holder
from pyclustering import __PYCLUSTERING_ROOT_DIRECTORY__
class pyclustering_integration_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_integration_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(integration_suite):
integration_suite.addTests(unittest.TestLoader().discover(__PYCLUSTERING_ROOT_DIRECTORY__, "it_*.py"))
class pyclustering_unit_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_unit_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(unit_suite):
unit_suite.addTests(unittest.TestLoader().discover(__PYCLUSTERING_ROOT_DIRECTORY__, "ut_*.py"))
class pyclustering_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(pyclustering_suite):
pyclustering_integration_tests.fill_suite(pyclustering_suite)
pyclustering_unit_tests.fill_suite(pyclustering_suite)
class exit_code(enum.IntEnum):
success = 0,
error_unknown_type_test = -1,
error_too_many_arguments = -2,
error_failure = -3
class tests_runner:
@staticmethod
def run():
result = None
return_code = exit_code.success
warnings.filterwarnings('ignore', 'Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.')
if len(sys.argv) == 1:
result = pyclustering_tests().run()
elif len(sys.argv) == 2:
if sys.argv[1] == "--integration":
result = pyclustering_integration_tests().run()
elif sys.argv[1] == "--unit":
result = pyclustering_unit_tests().run()
elif sys.argv[1] == "test":
result = pyclustering_tests().run()
else:
print("Unknown type of test is specified '" + str(sys.argv[1]) + "'.")
return_code = exit_code.error_unknown_type_test
else:
print("Too many arguments '" + str(len(sys.argv)) + "' is used.")
return_code = exit_code.error_too_many_arguments
# Get execution result
if result is not None:
if result.wasSuccessful() is False:
return_code = exit_code.error_failure
exit(return_code)
|
gpl-3.0
|
murali-munna/scikit-learn
|
sklearn/linear_model/randomized_l1.py
|
95
|
23365
|
"""
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
|
bsd-3-clause
|
Nyker510/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
57
|
13752
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bsd-3-clause
|
rwightman/tensorflow-litterbox
|
litterbox/sdc_export_graph.py
|
1
|
8142
|
# Copyright (C) 2016 Ross Wightman. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
"""
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import os
from copy import deepcopy
from fabric import util
from models import ModelSdc
from processors import ProcessorSdc
from collections import defaultdict
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'root_network', 'resnet_v1_50',
"""Either resnet_v1_50, resnet_v1_101, resnet_v1_152, inception_resnet_v2, nvidia_sdc""")
tf.app.flags.DEFINE_integer(
'top_version', 5,
"""Top level network version, specifies output layer variations. See model code.""")
tf.app.flags.DEFINE_boolean(
'bayesian', False, """Activate dropout layers for inference.""")
tf.app.flags.DEFINE_integer(
'samples', 0, """Activate dropout layers for inference.""")
tf.app.flags.DEFINE_string(
'checkpoint_path', '', """Checkpoint file for model.""")
tf.app.flags.DEFINE_string(
'ensemble_path', '', """CSV file with ensemble specification. Use as alternative to single model checkpoint.""")
tf.app.flags.DEFINE_string(
'name', 'model', """Name prefix for outputs of exported artifacts.""")
def _weighted_mean(outputs_list, weights_tensor):
assert isinstance(outputs_list[0], tf.Tensor)
print(outputs_list)
outputs_tensor = tf.concat(1, outputs_list)
print('outputs concat', outputs_tensor.get_shape())
if len(outputs_list) > 1:
weighted_outputs = outputs_tensor * weights_tensor
print('weighted outputs ', weighted_outputs.get_shape())
outputs_tensor = tf.reduce_mean(weighted_outputs)
else:
outputs_tensor = tf.squeeze(outputs_tensor)
return outputs_tensor
def _merge_outputs(outputs, weights):
assert outputs
merged = defaultdict(list)
weights_tensor = tf.pack(weights)
print('weights ', weights_tensor.get_shape())
# recombine multiple model outputs by dict key or list position under output name based dict
if isinstance(outputs[0], dict):
for o in outputs:
for name, tensor in o.items():
merged['output_%s' % name].append(tensor)
elif isinstance(outputs[0], list):
for o in outputs:
for index, tensor in enumerate(o):
merged['output_%d' % index].append(tensor)
else:
merged['output'] = outputs
reduced = {name: _weighted_mean(value_list, weights_tensor) for name, value_list in merged.items()}
for k, v in reduced.items():
print(k, v, v.get_shape())
return reduced
def build_export_graph(models, batch_size=1, export_scope=''):
assert models
inputs = tf.placeholder(tf.uint8, [None, None, 3], name='input_placeholder')
print("Graph Inputs: ")
print(inputs.name, inputs.get_shape())
with tf.device('/gpu:0'):
inputs = tf.cast(inputs, tf.float32)
inputs = tf.div(inputs, 255)
input_tensors = [inputs, tf.zeros(shape=()), tf.constant('', dtype=tf.string)]
model_outputs_list = []
weights_list = []
for m in models:
with tf.variable_scope(m['name'], values=input_tensors):
model, processor = m['model'], m['processor']
processed_inputs = processor.process_example(input_tensors, mode='pred')
if batch_size > 1:
processed_inputs = [tf.gather(tf.expand_dims(x, 0), [0] * batch_size) for x in processed_inputs]
processed_inputs = processor.reshape_batch(processed_inputs, batch_size=batch_size)
model_outputs = model.build_tower(
processed_inputs[0], is_training=False, summaries=False)
model_outputs_list += [model.get_predictions(model_outputs, processor)]
weights_list += [m['weight']]
merged_outputs = _merge_outputs(model_outputs_list, weights_list)
print("Graph Outputs: ")
outputs = []
for name, output in merged_outputs.items():
outputs += [tf.identity(output, name)]
[print(x.name, x.get_shape()) for x in outputs]
return inputs, outputs
def main(_):
util.check_tensorflow_version()
assert os.path.isfile(FLAGS.checkpoint_path) or os.path.isfile(FLAGS.ensemble_path)
model_args_list = []
if FLAGS.checkpoint_path:
model_args_list.append(
{
'root_network': FLAGS.root_network,
'top_version': FLAGS.top_version,
'image_norm': FLAGS.image_norm,
'image_size': FLAGS.image_size,
'image_aspect': FLAGS.image_aspect,
'checkpoint_path': FLAGS.checkpoint_path,
'bayesian': FLAGS.bayesian,
'weight': 1.0,
}
)
else:
ensemble_df = pd.DataFrame.from_csv(FLAGS.ensemble_path, index_col=None)
model_args_list += ensemble_df.to_dict('records')
model_params_common = {
'outputs': {
'steer': 1,
# 'xyz': 2,
},
}
model_list = []
for i, args in enumerate(model_args_list):
print(args)
model_name = 'model_%d' % i
model_params = deepcopy(model_params_common)
model_params['network'] = args['root_network']
model_params['version'] = args['top_version']
model_params['bayesian'] = FLAGS.bayesian
model = ModelSdc(params=model_params)
processor_params = {}
processor_params['image_norm'] = args['image_norm']
processor_params['image_size'] = args['image_size']
processor_params['image_aspect'] = args['image_aspect']
processor = ProcessorSdc(params=processor_params)
model_list.append({
'model': model,
'processor': processor,
'weight': args['weight'],
'name': model_name,
'checkpoint_path': args['checkpoint_path']
})
name_prefix = FLAGS.name
with tf.Graph().as_default() as g:
batch_size = 1 if not FLAGS.samples else FLAGS.samples
build_export_graph(models=model_list, batch_size=batch_size)
model_variables = tf.contrib.framework.get_model_variables()
saver = tf.train.Saver(model_variables)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
g_def = g.as_graph_def(add_shapes=True)
tf.train.write_graph(g_def, './', name='%s-graph_def.pb.txt' % name_prefix)
for m in model_list:
checkpoint_variable_set = set()
checkpoint_path, global_step = util.resolve_checkpoint_path(m['checkpoint_path'])
if not checkpoint_path:
print('No checkpoint file found at %s' % m['checkpoint_path'])
return
reader = tf.train.NewCheckpointReader(checkpoint_path)
checkpoint_variable_set.update(reader.get_variable_to_shape_map().keys())
variables_to_restore = m['model'].variables_to_restore(
restore_outputs=True,
checkpoint_variable_set=checkpoint_variable_set,
prefix_scope=m['name'])
saver_local = tf.train.Saver(variables_to_restore)
saver_local.restore(sess, checkpoint_path)
print('Successfully loaded model from %s at step=%d.' % (checkpoint_path, global_step))
saver.export_meta_graph('./%s-meta_graph.pb.txt' % name_prefix, as_text=True)
saver.save(sess, './%s-checkpoint' % name_prefix, write_meta_graph=True)
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
DSLituiev/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
34
|
50761
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
jreback/pandas
|
pandas/plotting/_matplotlib/tools.py
|
1
|
14337
|
# being a bit too dynamic
from math import ceil
from typing import TYPE_CHECKING, Iterable, List, Sequence, Tuple, Union
import warnings
import matplotlib.table
import matplotlib.ticker as ticker
import numpy as np
from pandas._typing import FrameOrSeriesUnion
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCSeries
from pandas.plotting._matplotlib import compat
if TYPE_CHECKING:
from matplotlib.axes import Axes
from matplotlib.axis import Axis
from matplotlib.lines import Line2D
from matplotlib.table import Table
def format_date_labels(ax: "Axes", rot):
# mini version of autofmt_xdate
for label in ax.get_xticklabels():
label.set_ha("right")
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
def table(
ax, data: FrameOrSeriesUnion, rowLabels=None, colLabels=None, **kwargs
) -> "Table":
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError("Input data must be DataFrame or Series")
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
table = matplotlib.table.table(
ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs
)
return table
def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int, int]:
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError("Layout must be a tuple of (rows, columns)")
nrows, ncols = layout
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil(nplots / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil(nplots / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError(
f"Layout of {nrows}x{ncols} must be larger than required size {nplots}"
)
return layout
if layout_type == "single":
return (1, 1)
elif layout_type == "horizontal":
return (1, nplots)
elif layout_type == "vertical":
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def create_subplots(
naxes: int,
sharex: bool = False,
sharey: bool = False,
squeeze: bool = True,
subplot_kw=None,
ax=None,
layout=None,
layout_type: str = "box",
**fig_kw,
):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horizontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns
-------
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
Examples
--------
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
if squeeze:
ax = flatten_axes(ax)
if layout is not None:
warnings.warn(
"When passing multiple axes, layout keyword is ignored", UserWarning
)
if sharex or sharey:
warnings.warn(
"When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified when creating axes",
UserWarning,
stacklevel=4,
)
if ax.size == naxes:
fig = ax.flat[0].get_figure()
return fig, ax
else:
raise ValueError(
f"The number of passed axes must be {naxes}, the "
"same as the output plot"
)
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, flatten_axes(ax)
else:
warnings.warn(
"To output multiple subplots, the figure containing "
"the passed axes is being cleared",
UserWarning,
stacklevel=4,
)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw["sharex"] = ax0
if sharey:
subplot_kw["sharey"] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds["sharex"] = None
kwds["sharey"] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis: "Axis"):
for t in axis.get_majorticklabels():
t.set_visible(False)
# set_visible will not be effective if
# minor axis has NullLocator and NullFormatter (default)
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(""))
for t in axis.get_minorticklabels():
t.set_visible(False)
axis.get_label().set_visible(False)
def _has_externally_shared_axis(ax1: "matplotlib.axes", compare_axis: "str") -> bool:
"""
Return whether an axis is externally shared.
Parameters
----------
ax1 : matplotlib.axes
Axis to query.
compare_axis : str
`"x"` or `"y"` according to whether the X-axis or Y-axis is being
compared.
Returns
-------
bool
`True` if the axis is externally shared. Otherwise `False`.
Notes
-----
If two axes with different positions are sharing an axis, they can be
referred to as *externally* sharing the common axis.
If two axes sharing an axis also have the same position, they can be
referred to as *internally* sharing the common axis (a.k.a twinning).
_handle_shared_axes() is only interested in axes externally sharing an
axis, regardless of whether either of the axes is also internally sharing
with a third axis.
"""
if compare_axis == "x":
axes = ax1.get_shared_x_axes()
elif compare_axis == "y":
axes = ax1.get_shared_y_axes()
else:
raise ValueError(
"_has_externally_shared_axis() needs 'x' or 'y' as a second parameter"
)
axes = axes.get_siblings(ax1)
# Retain ax1 and any of its siblings which aren't in the same position as it
ax1_points = ax1.get_position().get_points()
for ax2 in axes:
if not np.array_equal(ax1_points, ax2.get_position().get_points()):
return True
return False
def handle_shared_axes(
axarr: Iterable["Axes"],
nplots: int,
naxes: int,
nrows: int,
ncols: int,
sharex: bool,
sharey: bool,
):
if nplots > 1:
if compat.mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
col_num = lambda x: x.get_subplotspec().colspan.start
else:
row_num = lambda x: x.rowNum
col_num = lambda x: x.colNum
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_)
for ax in axarr:
layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or _has_externally_shared_axis(ax, "x"):
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or _has_externally_shared_axis(ax, "x"):
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or _has_externally_shared_axis(ax, "y"):
_remove_labels_from_axis(ax.yaxis)
def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> np.ndarray:
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, ABCIndex)):
return np.asarray(axes).ravel()
return np.array(axes)
def set_ticks_props(
axes: Union["Axes", Sequence["Axes"]],
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
):
import matplotlib.pyplot as plt
for ax in flatten_axes(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def get_all_lines(ax: "Axes") -> List["Line2D"]:
lines = ax.get_lines()
if hasattr(ax, "right_ax"):
lines += ax.right_ax.get_lines()
if hasattr(ax, "left_ax"):
lines += ax.left_ax.get_lines()
return lines
def get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]:
left, right = np.inf, -np.inf
for line in lines:
x = line.get_xdata(orig=False)
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
|
bsd-3-clause
|
jdmcbr/geopandas
|
geopandas/tests/test_sindex.py
|
1
|
27341
|
import sys
from shapely.geometry import (
Point,
Polygon,
MultiPolygon,
box,
GeometryCollection,
LineString,
)
from numpy.testing import assert_array_equal
import geopandas
from geopandas import _compat as compat
from geopandas import GeoDataFrame, GeoSeries, read_file, datasets
import pytest
import numpy as np
@pytest.mark.skipif(sys.platform.startswith("win"), reason="fails on AppVeyor")
@pytest.mark.skip_no_sindex
class TestSeriesSindex:
def test_has_sindex(self):
"""Test the has_sindex method."""
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
d = GeoDataFrame({"geom": [t1, t2]}, geometry="geom")
assert not d.has_sindex
d.sindex
assert d.has_sindex
d.geometry.values._sindex = None
assert not d.has_sindex
d.sindex
assert d.has_sindex
s = GeoSeries([t1, t2])
assert not s.has_sindex
s.sindex
assert s.has_sindex
s.values._sindex = None
assert not s.has_sindex
s.sindex
assert s.has_sindex
def test_empty_geoseries(self):
"""Tests creating a spatial index from an empty GeoSeries."""
s = GeoSeries(dtype=object)
assert not s.sindex
assert len(s.sindex) == 0
def test_point(self):
s = GeoSeries([Point(0, 0)])
assert s.sindex.size == 1
hits = s.sindex.intersection((-1, -1, 1, 1))
assert len(list(hits)) == 1
hits = s.sindex.intersection((-2, -2, -1, -1))
assert len(list(hits)) == 0
def test_empty_point(self):
"""Tests that a single empty Point results in an empty tree."""
s = GeoSeries([Point()])
assert not s.sindex
assert len(s.sindex) == 0
def test_polygons(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
assert s.sindex.size == 3
def test_polygons_append(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
t = GeoSeries([t1, t2, sq], [3, 4, 5])
s = s.append(t)
assert len(s) == 6
assert s.sindex.size == 6
def test_lazy_build(self):
s = GeoSeries([Point(0, 0)])
assert s.values._sindex is None
assert s.sindex.size == 1
assert s.values._sindex is not None
def test_rebuild_on_item_change(self):
s = GeoSeries([Point(0, 0)])
original_index = s.sindex
s.iloc[0] = Point(0, 0)
assert s.sindex is not original_index
def test_rebuild_on_slice(self):
s = GeoSeries([Point(0, 0), Point(0, 0)])
original_index = s.sindex
# Select a couple of rows
sliced = s.iloc[:1]
assert sliced.sindex is not original_index
# Select all rows
sliced = s.iloc[:]
assert sliced.sindex is original_index
# Select all rows and flip
sliced = s.iloc[::-1]
assert sliced.sindex is not original_index
@pytest.mark.skipif(sys.platform.startswith("win"), reason="fails on AppVeyor")
@pytest.mark.skip_no_sindex
class TestFrameSindex:
def setup_method(self):
data = {
"A": range(5),
"B": range(-5, 0),
"geom": [Point(x, y) for x, y in zip(range(5), range(5))],
}
self.df = GeoDataFrame(data, geometry="geom")
def test_sindex(self):
self.df.crs = "epsg:4326"
assert self.df.sindex.size == 5
hits = list(self.df.sindex.intersection((2.5, 2.5, 4, 4)))
assert len(hits) == 2
assert hits[0] == 3
def test_lazy_build(self):
assert self.df.geometry.values._sindex is None
assert self.df.sindex.size == 5
assert self.df.geometry.values._sindex is not None
def test_sindex_rebuild_on_set_geometry(self):
# First build the sindex
assert self.df.sindex is not None
original_index = self.df.sindex
self.df.set_geometry(
[Point(x, y) for x, y in zip(range(5, 10), range(5, 10))], inplace=True
)
assert self.df.sindex is not original_index
def test_rebuild_on_row_slice(self):
# Select a subset of rows rebuilds
original_index = self.df.sindex
sliced = self.df.iloc[:1]
assert sliced.sindex is not original_index
# Slicing all does not rebuild
original_index = self.df.sindex
sliced = self.df.iloc[:]
assert sliced.sindex is original_index
# Re-ordering rebuilds
sliced = self.df.iloc[::-1]
assert sliced.sindex is not original_index
def test_rebuild_on_single_col_selection(self):
"""Selecting a single column should not rebuild the spatial index."""
# Selecting geometry column preserves the index
original_index = self.df.sindex
geometry_col = self.df["geom"]
assert geometry_col.sindex is original_index
geometry_col = self.df.geometry
assert geometry_col.sindex is original_index
@pytest.mark.skipif(
not compat.PANDAS_GE_10, reason="Column selection returns a copy on pd<=1.0.0"
)
def test_rebuild_on_multiple_col_selection(self):
"""Selecting a subset of columns preserves the index."""
original_index = self.df.sindex
# Selecting a subset of columns preserves the index
subset1 = self.df[["geom", "A"]]
assert subset1.sindex is original_index
subset2 = self.df[["A", "geom"]]
assert subset2.sindex is original_index
# Skip to accommodate Shapely geometries being unhashable
@pytest.mark.skip
class TestJoinSindex:
def setup_method(self):
nybb_filename = geopandas.datasets.get_path("nybb")
self.boros = read_file(nybb_filename)
def test_merge_geo(self):
# First check that we gets hits from the boros frame.
tree = self.boros.sindex
hits = tree.intersection((1012821.80, 229228.26))
res = [self.boros.iloc[hit]["BoroName"] for hit in hits]
assert res == ["Bronx", "Queens"]
# Check that we only get the Bronx from this view.
first = self.boros[self.boros["BoroCode"] < 3]
tree = first.sindex
hits = tree.intersection((1012821.80, 229228.26))
res = [first.iloc[hit]["BoroName"] for hit in hits]
assert res == ["Bronx"]
# Check that we only get Queens from this view.
second = self.boros[self.boros["BoroCode"] >= 3]
tree = second.sindex
hits = tree.intersection((1012821.80, 229228.26))
res = ([second.iloc[hit]["BoroName"] for hit in hits],)
assert res == ["Queens"]
# Get both the Bronx and Queens again.
merged = first.merge(second, how="outer")
assert len(merged) == 5
assert merged.sindex.size == 5
tree = merged.sindex
hits = tree.intersection((1012821.80, 229228.26))
res = [merged.iloc[hit]["BoroName"] for hit in hits]
assert res == ["Bronx", "Queens"]
@pytest.mark.skip_no_sindex
class TestPygeosInterface:
def setup_method(self):
data = {
"geom": [Point(x, y) for x, y in zip(range(5), range(5))]
+ [box(10, 10, 20, 20)] # include a box geometry
}
self.df = GeoDataFrame(data, geometry="geom")
self.expected_size = len(data["geom"])
# --------------------------- `intersection` tests -------------------------- #
@pytest.mark.parametrize(
"test_geom, expected",
(
((-1, -1, -0.5, -0.5), []),
((-0.5, -0.5, 0.5, 0.5), [0]),
((0, 0, 1, 1), [0, 1]),
((0, 0), [0]),
),
)
def test_intersection_bounds_tuple(self, test_geom, expected):
"""Tests the `intersection` method with valid inputs."""
res = list(self.df.sindex.intersection(test_geom))
assert_array_equal(res, expected)
@pytest.mark.parametrize("test_geom", ((-1, -1, -0.5), -0.5, None, Point(0, 0)))
def test_intersection_invalid_bounds_tuple(self, test_geom):
"""Tests the `intersection` method with invalid inputs."""
if compat.USE_PYGEOS:
with pytest.raises(TypeError):
# we raise a useful TypeError
self.df.sindex.intersection(test_geom)
else:
with pytest.raises((TypeError, Exception)):
# catch a general exception
# rtree raises an RTreeError which we need to catch
self.df.sindex.intersection(test_geom)
# ------------------------------ `query` tests ------------------------------ #
@pytest.mark.parametrize(
"predicate, test_geom, expected",
(
(None, box(-1, -1, -0.5, -0.5), []), # bbox does not intersect
(None, box(-0.5, -0.5, 0.5, 0.5), [0]), # bbox intersects
(None, box(0, 0, 1, 1), [0, 1]), # bbox intersects multiple
(
None,
LineString([(0, 1), (1, 0)]),
[0, 1],
), # bbox intersects but not geometry
("intersects", box(-1, -1, -0.5, -0.5), []), # bbox does not intersect
(
"intersects",
box(-0.5, -0.5, 0.5, 0.5),
[0],
), # bbox and geometry intersect
(
"intersects",
box(0, 0, 1, 1),
[0, 1],
), # bbox and geometry intersect multiple
(
"intersects",
LineString([(0, 1), (1, 0)]),
[],
), # bbox intersects but not geometry
("within", box(0.25, 0.28, 0.75, 0.75), []), # does not intersect
("within", box(0, 0, 10, 10), []), # intersects but is not within
("within", box(11, 11, 12, 12), [5]), # intersects and is within
("within", LineString([(0, 1), (1, 0)]), []), # intersects but not within
("contains", box(0, 0, 1, 1), []), # intersects but does not contain
("contains", box(0, 0, 1.001, 1.001), [1]), # intersects and contains
("contains", box(0.5, 0.5, 1.5, 1.5), [1]), # intersects and contains
("contains", box(-1, -1, 2, 2), [0, 1]), # intersects and contains multiple
(
"contains",
LineString([(0, 1), (1, 0)]),
[],
), # intersects but not contains
("touches", box(-1, -1, 0, 0), [0]), # bbox intersects and touches
(
"touches",
box(-0.5, -0.5, 1.5, 1.5),
[],
), # bbox intersects but geom does not touch
(
"contains",
box(10, 10, 20, 20),
[5],
), # contains but does not contains_properly
(
"covers",
box(-0.5, -0.5, 1, 1),
[0, 1],
), # covers (0, 0) and (1, 1)
(
"covers",
box(0.001, 0.001, 0.99, 0.99),
[],
), # does not cover any
(
"covers",
box(0, 0, 1, 1),
[0, 1],
), # covers but does not contain
(
"contains_properly",
box(0, 0, 1, 1),
[],
), # intersects but does not contain
(
"contains_properly",
box(0, 0, 1.001, 1.001),
[1],
), # intersects 2 and contains 1
(
"contains_properly",
box(0.5, 0.5, 1.001, 1.001),
[1],
), # intersects 1 and contains 1
(
"contains_properly",
box(0.5, 0.5, 1.5, 1.5),
[1],
), # intersects and contains
(
"contains_properly",
box(-1, -1, 2, 2),
[0, 1],
), # intersects and contains multiple
(
"contains_properly",
box(10, 10, 20, 20),
[],
), # contains but does not contains_properly
),
)
def test_query(self, predicate, test_geom, expected):
"""Tests the `query` method with valid inputs and valid predicates."""
res = self.df.sindex.query(test_geom, predicate=predicate)
assert_array_equal(res, expected)
def test_query_invalid_geometry(self):
"""Tests the `query` method with invalid geometry."""
with pytest.raises(TypeError):
self.df.sindex.query("notavalidgeom")
@pytest.mark.parametrize(
"test_geom, expected_value",
[
(None, []),
(GeometryCollection(), []),
(Point(), []),
(MultiPolygon(), []),
(Polygon(), []),
],
)
def test_query_empty_geometry(self, test_geom, expected_value):
"""Tests the `query` method with empty geometry."""
res = self.df.sindex.query(test_geom)
assert_array_equal(res, expected_value)
def test_query_invalid_predicate(self):
"""Tests the `query` method with invalid predicates."""
test_geom = box(-1, -1, -0.5, -0.5)
with pytest.raises(ValueError):
self.df.sindex.query(test_geom, predicate="test")
@pytest.mark.parametrize(
"sort, expected",
(
(True, [[0, 0, 0], [0, 1, 2]]),
# False could be anything, at least we'll know if it changes
(False, [[0, 0, 0], [0, 1, 2]]),
),
)
def test_query_sorting(self, sort, expected):
"""Check that results from `query` don't depend on the
order of geometries.
"""
# these geometries come from a reported issue:
# https://github.com/geopandas/geopandas/issues/1337
# there is no theoretical reason they were chosen
test_polys = GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])])
tree_polys = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
expected = [0, 1, 2]
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
tree_df = geopandas.GeoDataFrame(geometry=tree_polys)
test_df = geopandas.GeoDataFrame(geometry=test_polys)
test_geo = test_df.geometry.values.data[0]
res = tree_df.sindex.query(test_geo, sort=sort)
# asserting the same elements
assert sorted(res) == sorted(expected)
# asserting the exact array can fail if sort=False
try:
assert_array_equal(res, expected)
except AssertionError as e:
if sort is False:
pytest.xfail(
"rtree results are known to be unordered, see "
"https://github.com/geopandas/geopandas/issues/1337\n"
"Expected:\n {}\n".format(expected)
+ "Got:\n {}\n".format(res.tolist())
)
raise e
# ------------------------- `query_bulk` tests -------------------------- #
@pytest.mark.parametrize(
"predicate, test_geom, expected",
(
(None, [(-1, -1, -0.5, -0.5)], [[], []]),
(None, [(-0.5, -0.5, 0.5, 0.5)], [[0], [0]]),
(None, [(0, 0, 1, 1)], [[0, 0], [0, 1]]),
("intersects", [(-1, -1, -0.5, -0.5)], [[], []]),
("intersects", [(-0.5, -0.5, 0.5, 0.5)], [[0], [0]]),
("intersects", [(0, 0, 1, 1)], [[0, 0], [0, 1]]),
# only second geom intersects
("intersects", [(-1, -1, -0.5, -0.5), (-0.5, -0.5, 0.5, 0.5)], [[1], [0]]),
# both geoms intersect
(
"intersects",
[(-1, -1, 1, 1), (-0.5, -0.5, 0.5, 0.5)],
[[0, 0, 1], [0, 1, 0]],
),
("within", [(0.25, 0.28, 0.75, 0.75)], [[], []]), # does not intersect
("within", [(0, 0, 10, 10)], [[], []]), # intersects but is not within
("within", [(11, 11, 12, 12)], [[0], [5]]), # intersects and is within
(
"contains",
[(0, 0, 1, 1)],
[[], []],
), # intersects and covers, but does not contain
(
"contains",
[(0, 0, 1.001, 1.001)],
[[0], [1]],
), # intersects 2 and contains 1
(
"contains",
[(0.5, 0.5, 1.001, 1.001)],
[[0], [1]],
), # intersects 1 and contains 1
("contains", [(0.5, 0.5, 1.5, 1.5)], [[0], [1]]), # intersects and contains
(
"contains",
[(-1, -1, 2, 2)],
[[0, 0], [0, 1]],
), # intersects and contains multiple
(
"contains",
[(10, 10, 20, 20)],
[[0], [5]],
), # contains but does not contains_properly
("touches", [(-1, -1, 0, 0)], [[0], [0]]), # bbox intersects and touches
(
"touches",
[(-0.5, -0.5, 1.5, 1.5)],
[[], []],
), # bbox intersects but geom does not touch
(
"covers",
[(-0.5, -0.5, 1, 1)],
[[0, 0], [0, 1]],
), # covers (0, 0) and (1, 1)
(
"covers",
[(0.001, 0.001, 0.99, 0.99)],
[[], []],
), # does not cover any
(
"covers",
[(0, 0, 1, 1)],
[[0, 0], [0, 1]],
), # covers but does not contain
(
"contains_properly",
[(0, 0, 1, 1)],
[[], []],
), # intersects but does not contain
(
"contains_properly",
[(0, 0, 1.001, 1.001)],
[[0], [1]],
), # intersects 2 and contains 1
(
"contains_properly",
[(0.5, 0.5, 1.001, 1.001)],
[[0], [1]],
), # intersects 1 and contains 1
(
"contains_properly",
[(0.5, 0.5, 1.5, 1.5)],
[[0], [1]],
), # intersects and contains
(
"contains_properly",
[(-1, -1, 2, 2)],
[[0, 0], [0, 1]],
), # intersects and contains multiple
(
"contains_properly",
[(10, 10, 20, 20)],
[[], []],
), # contains but does not contains_properly
),
)
def test_query_bulk(self, predicate, test_geom, expected):
"""Tests the `query_bulk` method with valid
inputs and valid predicates.
"""
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
test_geom = geopandas.GeoSeries(
[box(*geom) for geom in test_geom], index=range(len(test_geom))
)
res = self.df.sindex.query_bulk(test_geom, predicate=predicate)
assert_array_equal(res, expected)
@pytest.mark.parametrize(
"test_geoms, expected_value",
[
# single empty geometry
([GeometryCollection()], [[], []]),
# None should be skipped
([GeometryCollection(), None], [[], []]),
([None], [[], []]),
([None, box(-0.5, -0.5, 0.5, 0.5), None], [[1], [0]]),
],
)
def test_query_bulk_empty_geometry(self, test_geoms, expected_value):
"""Tests the `query_bulk` method with an empty geometry."""
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
# note: for this test, test_geoms (note plural) is a list already
test_geoms = geopandas.GeoSeries(test_geoms, index=range(len(test_geoms)))
res = self.df.sindex.query_bulk(test_geoms)
assert_array_equal(res, expected_value)
def test_query_bulk_empty_input_array(self):
"""Tests the `query_bulk` method with an empty input array."""
test_array = np.array([], dtype=object)
expected_value = [[], []]
res = self.df.sindex.query_bulk(test_array)
assert_array_equal(res, expected_value)
def test_query_bulk_invalid_input_geometry(self):
"""
Tests the `query_bulk` method with invalid input for the `geometry` parameter.
"""
test_array = "notanarray"
with pytest.raises(TypeError):
self.df.sindex.query_bulk(test_array)
def test_query_bulk_invalid_predicate(self):
"""Tests the `query_bulk` method with invalid predicates."""
test_geom_bounds = (-1, -1, -0.5, -0.5)
test_predicate = "test"
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
test_geom = geopandas.GeoSeries([box(*test_geom_bounds)], index=["0"])
with pytest.raises(ValueError):
self.df.sindex.query_bulk(test_geom.geometry, predicate=test_predicate)
@pytest.mark.parametrize(
"predicate, test_geom, expected",
(
(None, (-1, -1, -0.5, -0.5), [[], []]),
("intersects", (-1, -1, -0.5, -0.5), [[], []]),
("contains", (-1, -1, 1, 1), [[0], [0]]),
),
)
def test_query_bulk_input_type(self, predicate, test_geom, expected):
"""Tests that query_bulk can accept a GeoSeries, GeometryArray or
numpy array.
"""
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
test_geom = geopandas.GeoSeries([box(*test_geom)], index=["0"])
# test GeoSeries
res = self.df.sindex.query_bulk(test_geom, predicate=predicate)
assert_array_equal(res, expected)
# test GeometryArray
res = self.df.sindex.query_bulk(test_geom.geometry, predicate=predicate)
assert_array_equal(res, expected)
res = self.df.sindex.query_bulk(test_geom.geometry.values, predicate=predicate)
assert_array_equal(res, expected)
# test numpy array
res = self.df.sindex.query_bulk(
test_geom.geometry.values.data, predicate=predicate
)
assert_array_equal(res, expected)
res = self.df.sindex.query_bulk(
test_geom.geometry.values.data, predicate=predicate
)
assert_array_equal(res, expected)
@pytest.mark.parametrize(
"sort, expected",
(
(True, [[0, 0, 0], [0, 1, 2]]),
# False could be anything, at least we'll know if it changes
(False, [[0, 0, 0], [0, 1, 2]]),
),
)
def test_query_bulk_sorting(self, sort, expected):
"""Check that results from `query_bulk` don't depend
on the order of geometries.
"""
# these geometries come from a reported issue:
# https://github.com/geopandas/geopandas/issues/1337
# there is no theoretical reason they were chosen
test_polys = GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])])
tree_polys = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
# pass through GeoSeries to have GeoPandas
# determine if it should use shapely or pygeos geometry objects
tree_df = geopandas.GeoDataFrame(geometry=tree_polys)
test_df = geopandas.GeoDataFrame(geometry=test_polys)
res = tree_df.sindex.query_bulk(test_df.geometry, sort=sort)
# asserting the same elements
assert sorted(res[0]) == sorted(expected[0])
assert sorted(res[1]) == sorted(expected[1])
# asserting the exact array can fail if sort=False
try:
assert_array_equal(res, expected)
except AssertionError as e:
if sort is False:
pytest.xfail(
"rtree results are known to be unordered, see "
"https://github.com/geopandas/geopandas/issues/1337\n"
"Expected:\n {}\n".format(expected)
+ "Got:\n {}\n".format(res.tolist())
)
raise e
# --------------------------- misc tests ---------------------------- #
def test_empty_tree_geometries(self):
"""Tests building sindex with interleaved empty geometries."""
geoms = [Point(0, 0), None, Point(), Point(1, 1), Point()]
df = geopandas.GeoDataFrame(geometry=geoms)
assert df.sindex.query(Point(1, 1))[0] == 3
def test_size(self):
"""Tests the `size` property."""
assert self.df.sindex.size == self.expected_size
def test_len(self):
"""Tests the `__len__` method of spatial indexes."""
assert len(self.df.sindex) == self.expected_size
def test_is_empty(self):
"""Tests the `is_empty` property."""
# create empty tree
empty = geopandas.GeoSeries([], dtype=object)
assert empty.sindex.is_empty
empty = geopandas.GeoSeries([None])
assert empty.sindex.is_empty
empty = geopandas.GeoSeries([Point()])
assert empty.sindex.is_empty
# create a non-empty tree
non_empty = geopandas.GeoSeries([Point(0, 0)])
assert not non_empty.sindex.is_empty
@pytest.mark.parametrize(
"predicate, expected_shape",
[
(None, (2, 396)),
("intersects", (2, 172)),
("within", (2, 172)),
("contains", (2, 0)),
("overlaps", (2, 0)),
("crosses", (2, 0)),
("touches", (2, 0)),
],
)
def test_integration_natural_earth(self, predicate, expected_shape):
"""Tests output sizes for the naturalearth datasets."""
world = read_file(datasets.get_path("naturalearth_lowres"))
capitals = read_file(datasets.get_path("naturalearth_cities"))
res = world.sindex.query_bulk(capitals.geometry, predicate)
assert res.shape == expected_shape
@pytest.mark.skipif(not compat.HAS_RTREE, reason="no rtree installed")
def test_old_spatial_index_deprecated():
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
stream = ((i, item.bounds, None) for i, item in enumerate([t1, t2]))
with pytest.warns(FutureWarning):
idx = geopandas.sindex.SpatialIndex(stream)
assert list(idx.intersection((0, 0, 1, 1))) == [0, 1]
|
bsd-3-clause
|
scott-maddox/obpds
|
src/obpds/examples/degenerate_pn_diode.py
|
1
|
1898
|
#
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local obpds version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from obpds import *
# Layers
p = Layer(1*um, InAs, 1e15/cm3)
n = Layer(1*um, InAs, -2e18/cm3)
# Device
d = TwoTerminalDevice(layers=[p, n])
# Simulate and show the equilibrium band profile
import matplotlib.pyplot as plt
_, ax1 = plt.subplots()
ax1.set_ymargin(0.05)
ax1.set_ylabel('Energy (eV)')
ax1.set_xlabel('Depth (nm)')
solution = d.get_equilibrium(approx='boltzmann')
x = solution.x*1e7 # nm
ax1.plot(x, solution.Ev, 'r:')
ax1.plot(x, solution.Ec, 'b:', lw=2, label='Parabolic Boltzmann')
solution = d.get_equilibrium(approx='parabolic')
x = solution.x*1e7 # nm
ax1.plot(x, solution.Ev, 'r--')
ax1.plot(x, solution.Ec, 'b--', lw=2, label='Parabolic')
solution = d.get_equilibrium(approx='kane')
x = solution.x*1e7 # nm
ax1.plot(x, solution.Ev, 'r-')
ax1.plot(x, solution.Ec, 'b-', label='Non-parabolic Kane')
ax1.plot(x, solution.Ef, 'k--')
ax1.legend(loc='best')
plt.show()
|
agpl-3.0
|
walterreade/scikit-learn
|
sklearn/gaussian_process/tests/test_kernels.py
|
24
|
11602
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
bsd-3-clause
|
voxlol/scikit-learn
|
benchmarks/bench_glm.py
|
297
|
1493
|
"""
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
kose-y/pylearn2
|
pylearn2/sandbox/cuda_convnet/specialized_bench.py
|
44
|
3906
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
|
bsd-3-clause
|
aejax/KerasRL
|
dqn.py
|
1
|
8497
|
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation
from keras.optimizers import sgd, adam
import numpy as np
import matplotlib.pyplot as plt
import gym
import timeit
def epsilon_greedy(action, A, epsilon=0.1):
assert type(A) == gym.spaces.Discrete
rand = np.random.random()
if rand > epsilon:
return action
else:
return np.random.choice(A.n)
class ReplayMemory(object):
def __init__(self, memory_size, state_size):
self.memory_size = memory_size
self.states = np.zeros((memory_size, state_size))
self.actions = np.zeros((memory_size,), dtype='int32')
self.next_states = np.zeros((memory_size, state_size))
self.rewards = np.zeros((memory_size,))
self.idx = 0 #memory location
self.full = False
def add(self, transition):
state, action, next_state, reward = transition
self.states[self.idx] = state
self.actions[self.idx] = action
self.next_states[self.idx] = next_state
self.rewards[self.idx] = reward
# Update the memory location for the next add
if self.idx == (self.memory_size - 1):
self.idx = 0
self.full = True
else:
self.idx += 1
def get_minibatch(self, batch_size):
if self.full:
idxs = np.random.choice(self.memory_size, size=batch_size, replace=False)
else:
idxs = np.random.choice(self.idx, size=min(batch_size,self.idx), replace=False)
states = self.states[idxs]
actions = self.actions[idxs]
next_states = self.next_states[idxs]
rewards = self.rewards[idxs]
return states, actions, next_states, rewards
class DQN(object):
def __init__(self, S, A, learning_rate=1e-3, gamma=0.9, policy=epsilon_greedy, batch_size=32, update_freq=1000, memory_size=100):
self.S = S
self.A = A
self.learning_rate = learning_rate
self.gamma = gamma
self.policy = policy
self.update_freq = update_freq
self.batch_size = batch_size
self.input_dim = self.get_space_dim(S)
self.output_dim = self.get_space_dim(A)
################################
# Build Model and Target Model #
################################
model = Sequential()
model.add(Dense(10, input_dim=self.input_dim, name='layer1'))
model.add(Activation('relu'))
model.add(Dense(self.output_dim, name='layer2'))
model.compile(loss='mse', optimizer=sgd(lr=self.learning_rate))
self.model = model
self.target_model = Sequential.from_config(self.model.get_config())
self.target_model.set_weights(self.model.get_weights())
# Build Replay Memory #
self.replay_memory = ReplayMemory(memory_size, self.input_dim)
self.state = np.zeros(self.input_dim)
self.action = 0
self.time = 0
self.episode = 0
def observe(self, s_next, r, done):
s_next = s_next[np.newaxis,:] # turn vector into matrix
self.done = done
self.time_management(done)
transition = (self.state, self.action, s_next, r)
self.replay_memory.add(transition)
minibatch = self.replay_memory.get_minibatch(self.batch_size)
loss = self.batch_updates(minibatch)
self.state = s_next
return loss
def act(self, **kwargs):
action = self.model.predict(self.state).argmax()
self.action = self.policy(action, self.A, **kwargs)
return self.action
def batch_updates(self, minibatch):
targets = np.zeros((min(self.batch_size,self.time), self.output_dim))
states, actions, next_states, rewards = minibatch
targets = self.model.predict(states)
if self.done:
targets[actions] = rewards[:,np.newaxis]
else:
Qs = self.target_model.predict(next_states)
targets[actions] = (rewards + self.gamma * Qs.argmax(1))[:,np.newaxis]
loss = self.model.train_on_batch(states, targets)
return loss
def time_management(self, done):
self.time += 1
if done:
self.episode += 1
if self.time % self.update_freq == 0:
self.target_model.set_weights(self.model.get_weights())
@staticmethod
def get_space_dim(Space):
# get the dimensions of the spaces
if type(Space) == gym.spaces.Box:
s_dim = 1
for n in Space.shape:
s_dim *= n
elif type(Space) == gym.spaces.Discrete:
s_dim = Space.n
else:
print 'Wrong type for A: must be either Box or Discrete'
return s_dim
def run(env, agent, n_episode, tMax, plot=True):
returns = []
losses = []
start_time = timeit.default_timer()
for episode in xrange(n_episode):
observation = env.reset()
l_sum = agent.observe(observation, 0, False )
r_sum = 0
for t in xrange(tMax):
env.render(mode='human')
action = agent.act()
observation, reward, done, info = env.step(action)
l_sum += agent.observe(observation, reward, done)
r_sum += reward
if done:
break
if (episode+1)%10 == 0:
print 'Episode {} finished with return of {}.'.format(episode+1,r_sum)
returns.append(r_sum)
losses.append(l_sum)
end_time = timeit.default_timer()
ave_r = reduce(lambda x, y: x+y, returns) / n_episode
print 'Learning Rate: {:.2}'.format(agent.learning_rate)
print "Training time: {}".format(end_time - start_time)
print 'Average Reward: {}'.format(ave_r)
def movingaverage(values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'valid')
return sma
window_size = 100
rMVA = movingaverage(returns,window_size)
lMVA = movingaverage(losses,window_size)
if plot:
plt.figure()
plt.subplot(121)
plt.plot(rMVA)
plt.title('Rewards')
plt.xlabel('Average rewards per {} episodes'.format(window_size))
plt.subplot(122)
plt.plot(lMVA)
plt.title('Loss')
plt.xlabel('Average losses per {} episodes'.format(window_size))
plt.savefig('dqn_lr{:.2}.png'.format(agent.learning_rate), format='png')
plt.close('all')
return ave_r
def test_dqn():
import os
filename = 'dqn.h5'
single_run = True
load = False
save = False
n_episode = 100
tMax = 200
env = gym.make('CartPole-v0')
S = env.observation_space
A = env.action_space
learning_rate=5e-3
gamma=0.99
policy=epsilon_greedy
batch_size=32
update_freq=1000
memory_size=10000
agent = DQN(S, A, learning_rate=learning_rate, gamma=gamma, policy=policy,
batch_size=batch_size, update_freq=update_freq, memory_size=memory_size)
initial_weights = agent.model.get_weights()
if single_run:
agent = DQN(S, A, learning_rate=learning_rate, gamma=gamma, policy=policy,
batch_size=batch_size, update_freq=update_freq, memory_size=memory_size)
if load:
if os.path.isfile(filename):
agent.model = load_model(filename)
agent.target_model = load_model(filename)
ave_r = run(env, agent, n_episode, tMax, plot=False)
if save:
agent.model.save(filename)
else:
# Sample learning rates #
lrs = 10**np.random.uniform(-4.0, -2, size=10)
learning_rates = [lrs[n] for n in xrange(lrs.shape[0]) ]
ave_returns = []
for lr in learning_rates:
agent = DQN(S, A, learning_rate=lr, gamma=gamma, policy=policy,
batch_size=batch_size, update_freq=update_freq, memory_size=memory_size)
agent.model.set_weights(initial_weights)
agent.target_model.set_weights(initial_weights)
ave_r = run(env, agent, n_episode, tMax, plot=True)
ave_returns.append(ave_r)
plt.figure()
plt.semilogx(learning_rates, ave_returns, 'o')
plt.savefig('lr_returns.png'.format(), format='png')
plt.xlabel('learning rate')
plt.ylabel('average returns')
if __name__ == '__main__':
test_dqn()
|
gpl-3.0
|
drewokane/xray
|
xarray/core/coordinates.py
|
1
|
7707
|
from collections import Mapping
from contextlib import contextmanager
import pandas as pd
from . import formatting
from .merge import merge_dataarray_coords
from .pycompat import iteritems, basestring, OrderedDict
def _coord_merge_finalize(target, other, target_conflicts, other_conflicts,
promote_dims=None):
if promote_dims is None:
promote_dims = {}
for k in target_conflicts:
del target[k]
for k, v in iteritems(other):
if k not in other_conflicts:
var = v.variable
if k in promote_dims:
var = var.expand_dims(promote_dims[k])
target[k] = var
def _common_shape(*args):
dims = OrderedDict()
for arg in args:
for dim in arg.dims:
size = arg.shape[arg.get_axis_num(dim)]
if dim in dims and size != dims[dim]:
# sometimes we may not have checked the index first
raise ValueError('index %r not aligned' % dim)
dims[dim] = size
return dims
def _dim_shape(var):
return [(dim, size) for dim, size in zip(var.dims, var.shape)]
class AbstractCoordinates(Mapping):
def __getitem__(self, key):
if (key in self._names or
(isinstance(key, basestring) and
key.split('.')[0] in self._names)):
# allow indexing current coordinates or components
return self._data[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.update({key: value})
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self._variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __repr__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.MultiIndex`
"""
if ordered_dims is None:
ordered_dims = self.dims
indexes = [self._variables[k].to_index() for k in ordered_dims]
return pd.MultiIndex.from_product(indexes, names=list(ordered_dims))
def _merge_validate(self, other):
"""Determine conflicting variables to be dropped from either self or
other (or unresolvable conflicts that should just raise)
"""
self_conflicts = set()
other_conflicts = set()
promote_dims = {}
for k in self:
if k in other:
self_var = self._variables[k]
other_var = other[k].variable
if not self_var.broadcast_equals(other_var):
if k in self.dims and k in other.dims:
raise ValueError('index %r not aligned' % k)
if k not in self.dims:
self_conflicts.add(k)
if k not in other.dims:
other_conflicts.add(k)
elif _dim_shape(self_var) != _dim_shape(other_var):
promote_dims[k] = _common_shape(self_var, other_var)
self_conflicts.add(k)
return self_conflicts, other_conflicts, promote_dims
@contextmanager
def _merge_inplace(self, other):
if other is None:
yield
else:
# ignore conflicts in self because we don't want to remove
# existing coords in an in-place update
_, other_conflicts, promote_dims = self._merge_validate(other)
# treat promoted dimensions as a conflict, also because we don't
# want to modify existing coords
other_conflicts.update(promote_dims)
yield
_coord_merge_finalize(self, other, {}, other_conflicts)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implments the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
ds = self.to_dataset()
if other is not None:
conflicts = self._merge_validate(other)
_coord_merge_finalize(ds.coords, other, *conflicts)
return ds
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def _variables(self):
return self._data._variables
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def update(self, other):
self._data.update(other)
self._names.update(other.keys())
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
@property
def _variables(self):
return self._data._coords
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
dims = dict(zip(self.dims, self._data.shape))
return Dataset._construct_direct(coords, coord_names=set(self._names),
dims=dims, attrs=None)
def to_dataset(self):
return self._to_dataset()
def update(self, other):
new_vars = merge_dataarray_coords(
self._data.indexes, self._data._coords, other)
self._data._coords = new_vars
def __delitem__(self, key):
if key in self.dims:
raise ValueError('cannot delete a coordinate corresponding to a '
'DataArray dimension')
del self._data._coords[key]
class Indexes(Mapping):
def __init__(self, source):
self._source = source
def __iter__(self):
return iter(self._source.dims)
def __len__(self):
return len(self._source.dims)
def __contains__(self, key):
return key in self._source.dims
def __getitem__(self, key):
if key in self:
return self._source[key].to_index()
else:
raise KeyError(key)
def __repr__(self):
return formatting.indexes_repr(self)
|
apache-2.0
|
jriegel/FreeCAD
|
src/Mod/Plot/plotSeries/TaskPanel.py
|
26
|
17784
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Plot
from plotUtils import Paths
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(
form.items,
QtCore.SIGNAL("currentRowChanged(int)"),
self.onItem)
QtCore.QObject.connect(
form.label,
QtCore.SIGNAL("editingFinished()"),
self.onData)
QtCore.QObject.connect(
form.isLabel,
QtCore.SIGNAL("stateChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.style,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.marker,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.width,
QtCore.SIGNAL("valueChanged(double)"),
self.onData)
QtCore.QObject.connect(
form.size,
QtCore.SIGNAL("valueChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.color,
QtCore.SIGNAL("pressed()"),
self.onColor)
QtCore.QObject.connect(
form.remove,
QtCore.SIGNAL("pressed()"),
self.onRemove)
QtCore.QObject.connect(
Plot.getMdiArea(),
QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),
self.onMdiArea)
return False
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"plot_series",
"Configure series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"No label",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setText(
QtGui.QApplication.translate(
"plot_series",
"Remove serie",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "styleLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "markerLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Marker",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QListWidget, "items").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"List of available series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLineEdit, "label").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line title",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"If checked serie will not be considered for legend",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "lineStyle").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "markers").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QDoubleSpinBox, "lineWidth").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line width",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QSpinBox, "markerSize").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker size",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "color").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line and marker color",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Removes this serie",
None,
QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
"""Fill the style combo boxes with the availabel ones."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\'"
string += " (" + Line2D.lineStyles[style] + ")"
form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\'"
string += " (" + Line2D.markers[marker] + ")"
form.marker.addItem(string)
def onItem(self, row):
"""Executed when the selected item is modified."""
if not self.skip:
self.skip = True
self.item = row
self.updateUI()
self.skip = False
def onData(self):
"""Executed when the selected item data is modified."""
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(form.isLabel.isChecked()):
serie.name = None
form.label.setEnabled(False)
else:
serie.name = form.label.text()
form.label.setEnabled(True)
# Set line style and marker
style = form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(form.width.value())
serie.line.set_markersize(form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.color = self.widget(QtGui.QPushButton, "color")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
form.color.setStyleSheet(
"background-color: rgb({}, {}, {});".format(col.red(),
col.green(),
col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
"""Executed when the data serie must be removed."""
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
"""Executed when a new window is selected on the mdi area.
Keyword arguments:
subWin -- Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
plt = Plot.getPlot()
form.items.setEnabled(bool(plt))
form.label.setEnabled(bool(plt))
form.isLabel.setEnabled(bool(plt))
form.style.setEnabled(bool(plt))
form.marker.setEnabled(bool(plt))
form.width.setEnabled(bool(plt))
form.size.setEnabled(bool(plt))
form.color.setEnabled(bool(plt))
form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
form.label.setEnabled(False)
form.isLabel.setEnabled(False)
form.style.setEnabled(False)
form.marker.setEnabled(False)
form.width.setEnabled(False)
form.size.setEnabled(False)
form.color.setEnabled(False)
form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name is None:
form.isLabel.setChecked(True)
form.label.setEnabled(False)
form.label.setText("")
else:
form.isLabel.setChecked(False)
form.label.setText(serie.name)
# Set line style and marker
form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
form.style.setCurrentIndex(i)
form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
form.marker.setCurrentIndex(i)
# Set line width and marker size
form.width.setValue(serie.line.get_linewidth())
form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
form.color.setStyleSheet("background-color: rgb({}, {}, {});".format(
int(color[0] * 255),
int(color[1] * 255),
int(color[2] * 255)))
self.skip = False
def setList(self):
"""Setup the UI control values if it is possible."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.items.clear()
series = Plot.series()
for i in range(0, len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name is None:
string = string + '\"No label\"'
else:
string = string + serie.name
form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series) - 1
form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
|
lgpl-2.1
|
sgenoud/scikit-learn
|
examples/linear_model/plot_ard.py
|
5
|
2517
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with :ref:`bayesian_ridge_regression`.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, wich stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
pl.figure(figsize=(6, 5))
pl.title("Weights of the model")
pl.plot(clf.coef_, 'b-', label="ARD estimate")
pl.plot(ols.coef_, 'r--', label="OLS estimate")
pl.plot(w, 'g-', label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Histogram of the weights")
pl.hist(clf.coef_, bins=n_features, log=True)
pl.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
pl.ylabel("Features")
pl.xlabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Marginal log-likelihood")
pl.plot(clf.scores_)
pl.ylabel("Score")
pl.xlabel("Iterations")
pl.show()
|
bsd-3-clause
|
vermouthmjl/scikit-learn
|
sklearn/linear_model/ransac.py
|
17
|
17164
|
# coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss: string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight: array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' will be removed in version 0.20. Use "
"'loss' instead.", DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
|
bsd-3-clause
|
mikebenfield/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
121
|
4301
|
from itertools import product
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert_equal(model.nbrs_.n_neighbors,
n_neighbors)
|
bsd-3-clause
|
lucidfrontier45/scikit-learn
|
examples/gaussian_process/plot_gp_regression.py
|
2
|
4050
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print __doc__
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD style
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
|
bsd-3-clause
|
gghatano/scipy_2015_sklearn_tutorial
|
notebooks/helpers.py
|
19
|
5046
|
import numpy as np
from collections import defaultdict
import os
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.feature_extraction import DictVectorizer
# Can also use pandas!
def process_titanic_line(line):
# Split line on "," to get fields without comma confusion
vals = line.strip().split('",')
# replace spurious " characters
vals = [v.replace('"', '') for v in vals]
pclass = int(vals[0])
survived = int(vals[1])
name = str(vals[2])
sex = str(vals[3])
try:
age = float(vals[4])
except ValueError:
# Blank age
age = -1
sibsp = float(vals[5])
parch = int(vals[6])
ticket = str(vals[7])
try:
fare = float(vals[8])
except ValueError:
# Blank fare
fare = -1
cabin = str(vals[9])
embarked = str(vals[10])
boat = str(vals[11])
homedest = str(vals[12])
line_dict = {'pclass': pclass, 'survived': survived, 'name': name, 'sex': sex, 'age': age, 'sibsp': sibsp,
'parch': parch, 'ticket': ticket, 'fare': fare, 'cabin': cabin, 'embarked': embarked,
'boat': boat, 'homedest': homedest}
return line_dict
def load_titanic(test_size=.25, feature_skip_tuple=(), random_state=1999):
f = open(os.path.join('datasets', 'titanic', 'titanic3.csv'))
# Remove . from home.dest, split on quotes because some fields have commas
keys = f.readline().strip().replace('.', '').split('","')
lines = f.readlines()
f.close()
string_keys = ['name', 'sex', 'ticket', 'cabin', 'embarked', 'boat',
'homedest']
string_keys = [s for s in string_keys if s not in feature_skip_tuple]
numeric_keys = ['pclass', 'age', 'sibsp', 'parch', 'fare']
numeric_keys = [n for n in numeric_keys if n not in feature_skip_tuple]
train_vectorizer_list = []
test_vectorizer_list = []
n_samples = len(lines)
numeric_data = np.zeros((n_samples, len(numeric_keys)))
numeric_labels = np.zeros((n_samples,), dtype=int)
# Doing this twice is horribly inefficient but the file is small...
for n, l in enumerate(lines):
line_dict = process_titanic_line(l)
strings = {k: line_dict[k] for k in string_keys}
numeric_labels[n] = line_dict["survived"]
sss = StratifiedShuffleSplit(numeric_labels, n_iter=1, test_size=test_size,
random_state=12)
# This is a weird way to get the indices but it works
train_idx = None
test_idx = None
for train_idx, test_idx in sss:
pass
for n, l in enumerate(lines):
line_dict = process_titanic_line(l)
strings = {k: line_dict[k] for k in string_keys}
if n in train_idx:
train_vectorizer_list.append(strings)
else:
test_vectorizer_list.append(strings)
numeric_data[n] = np.asarray([line_dict[k]
for k in numeric_keys])
train_numeric = numeric_data[train_idx]
test_numeric = numeric_data[test_idx]
train_labels = numeric_labels[train_idx]
test_labels = numeric_labels[test_idx]
vec = DictVectorizer()
# .toarray() due to returning a scipy sparse array
train_categorical = vec.fit_transform(train_vectorizer_list).toarray()
test_categorical = vec.transform(test_vectorizer_list).toarray()
train_data = np.concatenate([train_numeric, train_categorical], axis=1)
test_data = np.concatenate([test_numeric, test_categorical], axis=1)
keys = numeric_keys + string_keys
return keys, train_data, test_data, train_labels, test_labels
FIELDNAMES = ('polarity', 'id', 'date', 'query', 'author', 'text')
def read_sentiment_csv(csv_file, fieldnames=FIELDNAMES, max_count=None,
n_partitions=1, partition_id=0):
import csv # put the import inside for use in IPython.parallel
def file_opener(csv_file):
try:
open(csv_file, 'r', encoding="latin1").close()
return open(csv_file, 'r', encoding="latin1")
except TypeError:
# Python 2 does not have encoding arg
return open(csv_file, 'rb')
texts = []
targets = []
with file_opener(csv_file) as f:
reader = csv.DictReader(f, fieldnames=fieldnames,
delimiter=',', quotechar='"')
pos_count, neg_count = 0, 0
for i, d in enumerate(reader):
if i % n_partitions != partition_id:
# Skip entry if not in the requested partition
continue
if d['polarity'] == '4':
if max_count and pos_count >= max_count / 2:
continue
pos_count += 1
texts.append(d['text'])
targets.append(1)
elif d['polarity'] == '0':
if max_count and neg_count >= max_count / 2:
continue
neg_count += 1
texts.append(d['text'])
targets.append(-1)
return texts, targets
|
cc0-1.0
|
merenlab/web
|
data/sar11-saavs/files/p-get_percent_identity.py
|
1
|
14809
|
'''loop over a bam file and get the edit distance to the reference genome stored in the NM tag
scale by aligned read length. works for bowtie2, maybe others. Adopted from
https://gigabaseorgigabyte.wordpress.com/2017/04/14/getting-the-edit-distance-from-a-bam-alignment-a-journey/'''
import sys
import pysam
import argparse
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
ap = argparse.ArgumentParser()
ap.add_argument("-b", required=False, help="bam filepaths comma separated (no spaces)")
ap.add_argument("-B", required=False, help="alternatively, a file of bam filepaths")
ap.add_argument("-p", required=False, action='store_true', help="provide if you want proper percent identity (unaligned bps are included in normalization)")
ap.add_argument("-u", required=False, action='store_true', help="if provided, histograms will be unnormalized")
ap.add_argument("-o", required=True, help="output filepath")
ap.add_argument("-r", required=False, default=None, help="If provided, only reads for specified references are considered. Comma separated.")
ap.add_argument("-R", required=False, default=None, help="If provided, only reads for specified references are considered. Filepath of list of references.")
ap.add_argument("-g", required=False, help="gene caller ids to report, comma-separated. requires -a flag")
ap.add_argument("-G", required=False, help="filepath of gene caller ids to report, single column file. requires -a flag")
ap.add_argument("-x", required=False, action='store_true', help="collapse histograms of individual genes into a single histogram per bam file. Valid only with -g and -G")
ap.add_argument("-a", required=False, help="output file of anvi-export-gene-calls, required for -g and -G, incompatible with -R")
ap.add_argument("-m", required=False, action='store_true', help="If provided, histogram will only be generated for the MEDIAN read length in each bam file. May want to use with -nummismatches")
ap.add_argument("-nummismatches", required=False, action='store_true', help="If provided, values are number of mismatches instead of percent identity. Highly recommended to use this with -m flag")
ap.add_argument("-mode", required=False, default='histogram', help="by default, this program reports histogram curves (-mode histogram). If ``-mode raw``, histograms are not computed and instead each read considered is a written with its percent identity value")
# These are parameters for mode = 'histogram'
ap.add_argument("-binsize", required=False, type=float, default=None, help="Size of each bin. Overrides behavior of -numbins")
ap.add_argument("-autobin", required=False, action='store_true', help="Size of each bin determined on a per bam basis (creates one bin for each mismatch, but you still must supply a -range value). -m is required for this mode")
ap.add_argument("-numbins", required=False, default=None, help="How many bins? default is 30")
ap.add_argument("-interpolate", default=None, required=False, help="How many points should form the interpolation and from where to where? Format is 'start,end,number'. (e.g. 67,100,200) If not provided, no interpolation. Required for autobin to normalize x-axis between bam files")
ap.add_argument("-range", required=False, default=None, help="What's the range? provide lower and upper bound comma-separated. default is 50,100")
ap.add_argument("-melted", required=False, action='store_true', help="Use melted output format. Required if using -autobin since each bam could have different bins")
# These are parameters for mode = 'raw'
ap.add_argument("-subsample", required=False, type=int, default=None, help="how many reads do you want to subsample? You probably don't need more than 50,000.")
args = vars(ap.parse_args())
sc = lambda parameters: any([args.get(parameter, False) for parameter in parameters])
raw_mode_parameters = ['subsample']
histogram_mode_parameters = ['binsize', 'autobin', 'numbins', 'interpolate', 'range', 'melted']
if args.get("mode") == 'histogram':
if sc(raw_mode_parameters):
raise Exception(" you are using mode = histogram. Don't use these parameters: {}".format(raw_mode_parameters))
# defaults
if not args.get("numbins"):
args['numbins'] = 30
if not args.get("range"):
args['range'] = "50,100"
if args.get("mode") == 'raw':
if sc(histogram_mode_parameters):
raise Exception(" you are using mode = raw. Don't use these parameters: {}".format(histogram_mode_parameters))
# checks
if args.get("g") and args.get("G"):
raise Exception("use -g or -G")
if (args.get("g") or args.get("G")) and not args.get("a"):
raise Exception("provide -a")
if (args.get("g") or args.get("G")) and (args.get("R") or args.get("r")):
raise Exception("no point providing -R/-r if using gene calls")
if args.get("x") and not (args.get('g') or args.get('G')):
raise Exception("you need to specify -g or -G to use -x")
if args.get("b") and args.get('B'):
raise Exception("specify either b or B, not both")
if not args.get("b") and not args.get('B'):
raise Exception("Specify one of b or B")
if args.get('autobin') and not args.get("melted"):
raise Exception("You can't autobin without using -melted output format.")
if args.get('autobin') and not args.get("m"):
raise Exception("You can't autobin without using -m.")
if args.get('autobin') and not args.get("interpolate"):
raise Exception("You can't autobin without using -interpolate.")
if args.get("g"):
genes = [int(x) for x in args['g'].split(',')]
elif args.get("G"):
print(args['G'])
genes = [int(x.strip()) for x in open(args['G']).readlines()]
else:
genes = None
if args.get("a"):
gene_info = pd.read_csv(args['a'], sep='\t')
if genes:
gene_info = gene_info[gene_info['gene_callers_id'].isin(genes)]
else:
gene_info = None
if args.get('b'):
bam_filepaths = args['b'].split(",") # comma separated bam file paths
if bam_filepaths[-1] == '':
bam_filepaths = bam_filepaths[:-1]
elif args.get('B'):
bam_filepaths = [x.strip() for x in open(args['B']).readlines()]
if not bam_filepaths:
print('no bam files provided. nothing to do.')
sys.exit()
proper_pident = args['p']
output = args['o']
if args.get('r'):
reference_names = args['r'].split(",") # comma separated reference names
elif args.get('R'):
reference_names = [x.strip() for x in open(args['R']).readlines()]
else:
reference_names = [None]
normalize = True if not args['u'] else False
############################################################################################################################
# preamble
attr = 'query_alignment_length' if not proper_pident else 'query_length'
if args.get('mode') == 'histogram':
range_low, range_hi = [int(x) for x in args['range'].split(",")]
if not args.get("autobin"):
if not args.get("binsize"):
bins_template = np.linspace(range_low, range_hi, int(args['numbins']), endpoint=True)
else:
bins_template = np.arange(range_hi, range_low - float(args['binsize']), -float(args['binsize']))[::-1]
if not args.get("nummismatches"):
bins_shifted = bins_template[1:] # include 100% as a datapoint
else:
bins_shifted = bins_template[:-1] # include 0 mismatches as a datapoint
if args.get("interpolate"):
interp_low, interp_hi, interp_num = [int(x) for x in args['interpolate'].split(",")]
interp_low += 0.5
bins_interp = np.linspace(interp_low, interp_hi, interp_num)
else:
bins_interp = np.array([])
I = lambda bins_shifted, counts, bins_interp: interp1d(bins_shifted, counts, kind='cubic')(bins_interp) if args['interpolate'] else counts
J = lambda true_or_false, length: length == median_length if true_or_false else True
K = lambda true_or_false, read: read.get_tag("NM") if true_or_false else 100*(1 - float(read.get_tag("NM")) / read.__getattribute__(attr))
if args.get("mode") == 'histogram':
percent_identity_hist = {'value':[], 'percent_identity':[], 'id':[]} if args.get("melted") else {}
if args.get("mode") == 'raw':
percent_identity_hist = {'value':[], 'id':[]}
for bam in bam_filepaths:
print('working on {}...'.format(bam))
bam_name = bam.split("/")[-1].replace(".bam", "")
samfile = pysam.AlignmentFile(bam, "rb")
if args.get("m"):
i = 0
read_lengths = []
for read in samfile.fetch():
read_lengths.append(read.query_length)
i += 1
array = np.array(read_lengths)
median_length = int(np.median(array))
second_median = int(np.median(array[array != float(median_length)]))
third_median = int(np.median(array[(array != float(median_length)) & (array != float(second_median))]))
print('median length was {}, second was {}, third was {}'.format(median_length, second_median, third_median))
# only if autobinning
if args.get("autobin"):
if not args.get("nummismatches"):
binsize = 100*(1. / median_length)
bins_template = np.arange(range_hi, range_low - binsize, -binsize)[::-1]
bins_template += binsize * 1e-4
bins_template[-1] = 100
bins_shifted = bins_template[1:] # include 100% as a datapoint
else:
bins_template = np.arange(range_hi, range_low - 1, -1)[::-1]
bins_shifted = bins_template[:-1] # include 0 mismatches as a datapoint
if gene_info is not None:
if not args.get('x'):
# each gene gets its own histogram
for index, row in gene_info.iterrows():
percent_identities = np.array([K(args.get("nummismatches"), read) for read in samfile.fetch(row['contig'], int(row['start']), int(row['stop'])) if J(args.get("m"), read.query_length)])
id_name = bam_name + "_" + str(row['gene_callers_id'])
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
elif args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
else:
# histograms for each gene are collapsed into a single histogram
percent_identities = []
for index, row in gene_info.iterrows():
percent_identities.extend([K(args.get("nummismatches"), read) for read in samfile.fetch(row['contig'], int(row['start']), int(row['stop'])) if J(args.get("m"), read.query_length)])
percent_identities = np.array(percent_identities)
id_name = bam_name
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
elif args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
else:
percent_identities = []
for reference_name in reference_names:
# 1 minus the ratio of mismatches to the length of the read/alignment
percent_identities.extend([K(args.get("nummismatches"), read) for read in samfile.fetch(reference=reference_name) if J(args.get("m"), read.query_length)])
percent_identities = np.array(percent_identities)
print("{} reads considered".format(len(percent_identities)))
# create a histogram
id_name = bam_name
if args.get("mode") == 'histogram':
counts, _ = np.histogram(percent_identities, bins=bins_template, density=normalize)
if args.get("melted"):
value = list(I(bins_shifted, counts, bins_interp))
percent_identity = list(bins_shifted if not args.get("interpolate") else bins_interp)
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
percent_identity_hist['percent_identity'].extend(percent_identity)
else:
percent_identity_hist[id_name] = I(bins_shifted, counts, bins_interp)
if args.get("mode") == 'raw':
if args.get("subsample") and args['subsample'] < len(percent_identities):
percent_identities = np.random.choice(percent_identities, args.get('subsample'), replace=False)
value = percent_identities.tolist()
percent_identity_hist['id'].extend([id_name] * len(value))
percent_identity_hist['value'].extend(value)
samfile.close()
print("")
# save the file
percent_identity_hist = pd.DataFrame(percent_identity_hist).reset_index(drop=True)
if not args.get("melted") and args.get("mode") == 'histogram':
percent_identity_hist['percent_identity' if not args['nummismatches'] else 'number_of_mismatches'] = bins_interp if args['interpolate'] else bins_shifted
percent_identity_hist.to_csv(output, sep="\t", index=False)
|
mit
|
etamponi/resilient-protocol
|
resilient/weighting_strategies.py
|
1
|
1354
|
from abc import ABCMeta, abstractmethod
import numpy
from sklearn.base import BaseEstimator
from resilient import pdfs
__author__ = 'Emanuele Tamponi <[email protected]>'
class WeightingStrategy(BaseEstimator):
__metaclass__ = ABCMeta
@abstractmethod
def prepare(self, inp, y):
pass
@abstractmethod
def add_estimator(self, est, inp, y, sample_weights):
pass
@abstractmethod
def weight_estimators(self, x):
pass
class SameWeight(WeightingStrategy):
def __init__(self):
self.weights_ = None
def prepare(self, inp, y):
self.weights_ = numpy.ones(0)
def add_estimator(self, est, inp, y, sample_weights):
self.weights_ = numpy.ones(1 + len(self.weights_))
def weight_estimators(self, x):
return self.weights_
class CentroidBasedWeightingStrategy(WeightingStrategy):
def __init__(self, pdf=pdfs.DistanceInverse(power=2)):
self.pdf = pdf
self.centroids_ = None
def prepare(self, inp, y):
self.centroids_ = []
def add_estimator(self, est, inp, y, sample_weights):
self.centroids_.append(
numpy.average(inp, axis=0, weights=sample_weights)
)
def weight_estimators(self, x):
scores = self.pdf.probabilities(self.centroids_, mean=x)
return scores
|
gpl-2.0
|
m3wolf/scimap
|
tests/test_pawley_refinement.py
|
1
|
6933
|
# -*- coding: utf-8 -*-
#
# Copyright © 2018 Mark Wolf
#
# This file is part of scimap.
#
# Scimap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scimap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scimap. If not, see <http://www.gnu.org/licenses/>.
# flake8: noqa
import unittest
import os
import warnings
import sys
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.chebyshev import chebval
from scimap import XRDScan, standards
from scimap.utilities import q_to_twotheta, twotheta_to_q
from scimap.peakfitting import remove_peak_from_df
from scimap.pawley_refinement import PawleyRefinement, predict_diffractogram, gaussian, cauchy
TESTDIR = os.path.join(os.path.dirname(__file__), "test-data-xrd")
COR_BRML = os.path.join(TESTDIR, 'corundum.brml')
@unittest.skip
class PawleyRefinementTest(unittest.TestCase):
wavelengths = (
(1.5406, 1), # Kα1
(1.5444, 0.5), # Kα2
)
def test_refine(self):
# Prepare the test objects
scan = XRDScan(COR_BRML, phase=standards.Corundum())
two_theta = q_to_twotheta(scan.scattering_lengths, wavelength=scan.wavelength)
refinement = PawleyRefinement(wavelengths=self.wavelengths,
phases=[scan.phases[0]],
num_bg_coeffs=9)
# Set starting parameters a bit closer to keep from taking too long
scan.phases[0].unit_cell.a = 4.755
scan.phases[0].unit_cell.c = 12.990
for r in scan.phases[0].reflection_list:
r.intensity *= 200
# Do the refinement
refinement.refine(two_theta=two_theta, intensities=scan.intensities)
predicted = refinement.predict(two_theta=two_theta)
actual = scan.diffractogram.counts.values
refinement.plot(two_theta=two_theta, intensities=scan.intensities)
plt.show()
# Check scale factors
scale = refinement.scale_factor(two_theta=two_theta)
print(scale)
# Check phase fractions
fracs = refinement.phase_fractions(two_theta=two_theta)
np.testing.assert_almost_equal(fracs, (1, ))
# Check background fitting
bg = refinement.background(two_theta=two_theta)
self.assertEqual(bg.shape, actual.shape)
# Check refined unit-cell parameters
unit_cell = refinement.unit_cells()
self.assertAlmostEqual(
unit_cell[0],
4.758877,
places=3,
)
self.assertAlmostEqual(
unit_cell[2],
12.992877,
places=3
)
peak_widths = refinement.peak_breadths(two_theta, scan.intensities)
self.assertAlmostEqual(peak_widths[0], 0.046434, places=5)
# Check that the refinement is of sufficient quality
error = np.sqrt(np.sum((actual-predicted)**2))/len(actual)
self.assertLess(error, 1.5)
def test_reflection_list(self):
corundum = standards.Corundum()
# Determine the min and max values for 2θ range
two_theta_range = (10, 80)
d_min = 1.5406 / 2 / np.sin(np.radians(40))
d_max = 1.5444 / 2 / np.sin(np.radians(5))
# Calculate expected reflections
d_list = [corundum.unit_cell.d_spacing(r.hkl) for r in corundum.reflection_list]
d_list = np.array(d_list)
h_list = np.array([r.intensity for r in corundum.reflection_list])
size_list = np.array([corundum.crystallite_size for d in d_list])
strain_list = np.array([corundum.strain for d in d_list])
# Restrict values to only those valid for this two-theta range
valid_refs = np.logical_and(d_list >= d_min, d_list <= d_max)
d_list = d_list[valid_refs]
h_list = h_list[valid_refs]
size_list = size_list[valid_refs]
strain_list = strain_list[valid_refs]
# Calculate the actual list and compare
ref_list = tuple(zip(d_list, h_list, size_list, strain_list))
refinement = PawleyRefinement(phases=[corundum],
wavelengths=self.wavelengths)
output = refinement.reflection_list(two_theta=two_theta_range, clip=True)
np.testing.assert_equal(output, ref_list)
def test_predict_diffractogram(self):
two_theta = np.linspace(10, 80, num=40001)
wavelengths = self.wavelengths
bg_coeffs = np.ones(shape=(10,))
bg_x = np.linspace(10/90, 80/90, num=len(two_theta)) - 1
bg = chebval(bg_x, bg_coeffs)
# Check background
result = predict_diffractogram(
two_theta=two_theta,
wavelengths=wavelengths,
background_coeffs=bg_coeffs,
reflections=(),
)
np.testing.assert_almost_equal(result, bg)
# Check list of reflections
reflections = np.array((
(3.0, 79, 100, 0),
(3/1.41, 15, 100, 0),
(1.4, 41, 100, 0),
(0.5, 27, 100, 0),
))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = predict_diffractogram(
two_theta=two_theta,
wavelengths=wavelengths,
background_coeffs=(0,),
u=0.01, v=0, w=0,
reflections=reflections,
lg_mix=0.5,
)
# Check that a warning is raised for the invalid
# reflection (0.5Å)
self.assertEqual(len(w), 2, "No warning raised")
# plt.plot(two_theta, result)
# plt.show()
self.assertAlmostEqual(np.max(result), 79, places=0)
def test_peak_shapes(self):
x = np.linspace(0, 10, num=501)
beta = 0.05
height = 20
gauss = gaussian(x, center=5, height=height, breadth=beta)
self.assertEqual(np.max(gauss), height)
area = np.trapz(gauss, x=x)
self.assertAlmostEqual(area/height, beta)
# Now check Cauchy function
cauch = cauchy(x, center=5, height=height, breadth=beta)
self.assertEqual(np.max(cauch), height)
# (Cauchy beta is less reliable because the tails have more weight)
area = np.trapz(cauch, x=x)
self.assertAlmostEqual(area/height, beta, places=2)
# plt.plot(x, gauss)
# plt.plot(x, cauch)
# plt.show()
|
gpl-3.0
|
jeiros/Scripts
|
AnalysisMDTraj/pca_analysis.py
|
1
|
4379
|
#!/usr/bin/env python
"""
This scripts takes a list of NetCDF MD trajectories and their corresponding
topology. Calculate a two-component PCA based on the pairwise distance between
alpha carbons of the protein bakcbone.
"""
import mdtraj as md
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import IncrementalPCA
import argparse
from traj_loading import load_Trajs
parser = argparse.ArgumentParser(usage="""{} Trajectories*.nc Topology.prmtop""".
format(sys.argv[0]),
epilog="""Load up a list of AMBER NetCDF
trajectories and their corresponding topology
with MDtraj. Calculate a two-component PCA
based on the pairwise distance between alpha
carbons of the protein bakcbone. Plot a
or scatter plot projection of the trajectories
onto the PC space""")
parser.add_argument("Trajectories", help="""An indefinite amount of AMBER
trajectories""", nargs="+")
parser.add_argument("Topology", help="""The topology .prmtop file that matches
the trajectories""")
parser.add_argument("Plot_type", help="""The type of plot to be used. Can
be a scatter plot or a hexbin plot.""",
choices=['scatter', 'hexbin'])
parser.add_argument("-s", "--save", help="Save the plots as .eps images",
action="store_true")
parser.add_argument("-st", "--stride", help="""Stride for the loading of the
trajectory. Must be a divisor of the chunk.
Default value is 1.""", default=1, type=int)
parser.add_argument("-ch", "--chunk", help="""Number of frames that will be
used by md.iterload to load up the trajectories. Must be
a multiplier of the stride.
Default is 100 frames.""", default=100, type=int)
parser.add_argument("-t", "--title", help="""Name of the eps image where the PCA
plot is stored. Default is PCA.""", default="PCA.eps")
args = parser.parse_args()
def get_pca_array(list_chunks, topology):
"""
Takes a list of mdtraj.Trajectory objects and featurize them to backbone -
Alpha Carbons pairwise distances. Perform 2 component Incremental
PCA on the featurized trajectory.
Parameters
----------
list_chunks: list of mdTraj.Trajectory objects
topology: str
Name of the Topology file
Returns
-------
Y: np.array shape(frames, features)
"""
pca = IncrementalPCA(n_components=2)
top = md.load_prmtop(topology)
ca_backbone = top.select("name CA")
pairs = top.select_pairs(ca_backbone, ca_backbone)
pair_distances = []
for chunk in list_chunks:
X = md.compute_distances(chunk, pairs)
pair_distances.append(X)
distance_array = np.concatenate(pair_distances)
print("No. of data points: %d" % distance_array.shape[0])
print("No. of features (pairwise distances): %d" % distance_array.shape[1])
Y = pca.fit_transform(distance_array)
return Y
# Plotting functions
def hex_plot(pca_array):
PC1 = pca_array[:, 0]
PC2 = pca_array[:, 1]
plt.figure()
plt.xlabel('PC1 (Å)')
plt.ylabel('PC2 (Å)')
plt.hexbin(x=PC1, y=PC2, bins='log', mincnt=1)
cb = plt.colorbar()
cb.set_label('log10(N)')
if args.save:
plt.savefig(args.title, dpi=900, format='eps')
else:
plt.show()
def scatter_plot(pca_array):
PC1 = pca_array[:, 0]
PC2 = pca_array[:, 1]
plt.figure()
plt.xlabel('PC1 (Å)')
plt.ylabel('PC2 (Å)')
plt.scatter(x=PC1, y=PC2, marker='x')
if args.save:
plt.savefig(args.title, dpi=900, format='eps')
else:
plt.show()
def main():
print('\n', args, '\n')
if args:
list_chunks = load_Trajs(sorted(args.Trajectories), args.Topology,
stride=args.stride, chunk=args.chunk)
pca_array = get_pca_array(list_chunks, args.Topology)
if args.Plot_type == 'scatter':
scatter_plot(pca_array)
else:
hex_plot(pca_array)
if __name__ == "__main__":
main()
|
mit
|
Edu-Glez/Bank_sentiment_analysis
|
env/lib/python3.6/site-packages/pandas/computation/pytables.py
|
7
|
20707
|
""" manage PyTables query interface via Expressions """
import ast
import time
import warnings
from functools import partial
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.types.common import is_list_like
import pandas.core.common as com
from pandas.compat import u, string_types, DeepChainMap
from pandas.core.base import StringMixin
from pandas.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.computation import expr, ops
from pandas.computation.ops import is_term, UndefinedVariableError
from pandas.computation.expr import BaseExprVisitor
from pandas.computation.common import _ensure_decoded
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif (isinstance(v, datetime) or hasattr(v, 'timetuple') or
kind == u('date')):
v = time.mktime(v.timetuple())
return TermValue(v, pd.Timestamp(v), kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif not isinstance(v, string_types):
v = stringify(v)
return TermValue(v, stringify(v), u('string'))
# string quoting
return TermValue(v, stringify(v), u('string'))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, op=None, value=None, queryables=None,
encoding=None, scope_level=0):
# try to be back compat
where = self.parse_back_compat(where, op, value)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = self.parse_back_compat(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where]) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def parse_back_compat(self, w, op=None, value=None):
""" allow backward compatibility for passed arguments """
if isinstance(w, dict):
w, op, value = w.get('field'), w.get('op'), w.get('value')
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
warnings.warn("passing a dict to Expr is deprecated, "
"pass the where as a single string",
FutureWarning, stacklevel=10)
if isinstance(w, tuple):
if len(w) == 2:
w, value = w
op = '=='
elif len(w) == 3:
w, op, value = w
warnings.warn("passing a tuple into Expr is deprecated, "
"pass the where as a single string",
FutureWarning, stacklevel=10)
if op is not None:
if not isinstance(w, string_types):
raise TypeError(
"where must be passed as a string if op/value are passed")
if isinstance(op, Expr):
raise TypeError("invalid op passed, must be a string")
w = "{0}{1}".format(w, op)
if value is not None:
if isinstance(value, Expr):
raise TypeError("invalid value passed, must be a string")
# stringify with quotes these values
def convert(v):
if (isinstance(v, (datetime, np.datetime64,
timedelta, np.timedelta64)) or
hasattr(v, 'timetuple')):
return "'{0}'".format(v)
return v
if isinstance(value, (list, tuple)):
value = [convert(v) for v in value]
else:
value = convert(value)
w = "{0}{1}".format(w, value)
warnings.warn("passing multiple values to Expr is deprecated, "
"pass the where as a single string",
FutureWarning, stacklevel=10)
return w
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"%s"' % self.converted
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
|
apache-2.0
|
yyjiang/scikit-learn
|
examples/cluster/plot_lena_ward_segmentation.py
|
271
|
1998
|
"""
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
f3r/scikit-learn
|
examples/tree/plot_tree_regression.py
|
95
|
1516
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
AlirezaShahabi/zipline
|
tests/test_algorithm.py
|
5
|
51101
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from datetime import timedelta
from mock import MagicMock
from nose_parameterized import parameterized
from six.moves import range
from textwrap import dedent
from unittest import TestCase
import numpy as np
import pandas as pd
from zipline.assets import AssetFinder
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.control_flow import nullctx
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
import zipline.utils.factory as factory
import zipline.utils.simfactory as simfactory
from zipline.errors import (
OrderDuringInitialize,
RegisterTradingControlPostInit,
TradingControlViolation,
AccountControlViolation,
SymbolNotFound,
RootSymbolNotFound,
)
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
AmbitiousStopLimitAlgorithm,
EmptyPositionsAlgorithm,
InvalidOrderAlgorithm,
RecordAlgorithm,
TestAlgorithm,
TestOrderAlgorithm,
TestOrderInstantAlgorithm,
TestOrderPercentAlgorithm,
TestOrderStyleForwardingAlgorithm,
TestOrderValueAlgorithm,
TestRegisterTransformAlgorithm,
TestTargetAlgorithm,
TestTargetPercentAlgorithm,
TestTargetValueAlgorithm,
SetLongOnlyAlgorithm,
SetAssetDateBoundsAlgorithm,
SetMaxPositionSizeAlgorithm,
SetMaxOrderCountAlgorithm,
SetMaxOrderSizeAlgorithm,
SetDoNotOrderListAlgorithm,
SetMaxLeverageAlgorithm,
api_algo,
api_get_environment_algo,
api_symbol_algo,
call_all_order_methods,
call_order_in_init,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
)
import zipline.utils.events
from zipline.utils.test_utils import (
assert_single_position,
drain_zipline,
to_utc,
)
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.assets import Equity
from zipline.finance.execution import LimitOrder
from zipline.finance.trading import SimulationParameters
from zipline.utils.api_support import set_algo_instance
from zipline.utils.events import DateRuleFactory, TimeRuleFactory
from zipline.algorithm import TradingAlgorithm
from zipline.protocol import DATASOURCE_TYPE
from zipline.finance.trading import TradingEnvironment
from zipline.finance.commission import PerShare
class TestRecordAlgorithm(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
133,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
def test_record_incr(self):
algo = RecordAlgorithm(
sim_params=self.sim_params)
output = algo.run(self.source)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(TestCase):
def setUp(self):
setup_logger(self)
sids = [1, 2]
self.sim_params = factory.create_simulation_parameters(
num_days=2,
data_frequency='minute',
emission_rate='minute',
)
self.source = factory.create_minutely_trade_source(
sids,
sim_params=self.sim_params,
concurrent=True,
)
def tearDown(self):
teardown_logger(self)
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = TradingAlgorithm(
initialize=lambda context: None,
handle_data=lambda context, data: None,
sim_params=self.sim_params,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-03 14:31:00+0000', tz='UTC'),
'end': pd.Timestamp('2006-01-04 21:00:00+0000', tz='UTC'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params)
algo.run(self.source)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order in orders_2:
algo.cancel_order(order)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params)
algo.run(self.source)
def test_schedule_function(self):
date_rules = DateRuleFactory
time_rules = TimeRuleFactory
def incrementer(algo, data):
algo.func_called += 1
self.assertEqual(
algo.get_datetime().time(),
datetime.time(hour=14, minute=31),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertEqual(algo.func_called, algo.days)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_funtion_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = TradingAlgorithm(
initialize=nop, handle_data=nop, sim_params=self.sim_params,
)
# Schedule something for NOT Always.
algo.schedule_function(nop, time_rule=zipline.utils.events.Never())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, zipline.utils.events.OncePerDay)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, zipline.utils.events.ComposedRule)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, zipline.utils.events.Always)
if mode == 'daily':
self.assertIsInstance(second, zipline.utils.events.Always)
else:
self.assertIsInstance(second, zipline.utils.events.Never)
self.assertIs(composer, zipline.utils.events.ComposedRule.lazy_and)
def test_asset_lookup(self):
metadata = {0: {'symbol': 'PLAY',
'asset_type': 'equity',
'start_date': '2002-01-01',
'end_date': '2004-01-01'},
1: {'symbol': 'PLAY',
'asset_type': 'equity',
'start_date': '2005-01-01',
'end_date': '2006-01-01'}}
algo = TradingAlgorithm(asset_metadata=metadata)
# Test before either PLAY existed
algo.datetime = pd.Timestamp('2001-12-01', tz='UTC')
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.datetime = pd.Timestamp('2002-12-01', tz='UTC')
list_result = algo.symbols('PLAY')
self.assertEqual(0, list_result[0])
# Test after first PLAY ends
algo.datetime = pd.Timestamp('2004-12-01', tz='UTC')
self.assertEqual(0, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.datetime = pd.Timestamp('2005-12-01', tz='UTC')
self.assertEqual(1, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
self.assertEqual(1, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(1, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(0), Equity)
self.assertIsInstance(algo.sid(1), Equity)
def test_future_chain(self):
""" Tests the future_chain API function.
"""
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
algo = TradingAlgorithm(asset_metadata=metadata)
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that the fields of the FutureChain object are set correctly
cl = algo.future_chain('CL')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, algo.datetime)
# Check the fields are set correctly if an as_of_date is supplied
as_of_date = pd.Timestamp('1952-08-11', tz='UTC')
cl = algo.future_chain('CL', as_of_date=as_of_date)
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, as_of_date)
cl = algo.future_chain('CL', as_of_date='1952-08-11')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, as_of_date)
# Check that weird capitalization is corrected
cl = algo.future_chain('cL')
self.assertEqual(cl.root_symbol, 'CL')
cl = algo.future_chain('cl')
self.assertEqual(cl.root_symbol, 'CL')
# Check that invalid root symbols raise RootSymbolNotFound
with self.assertRaises(RootSymbolNotFound):
algo.future_chain('CLZ')
with self.assertRaises(RootSymbolNotFound):
algo.future_chain('')
class TestTransformAlgorithm(TestCase):
def setUp(self):
setup_logger(self)
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
133,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
self.panel_source, self.panel = \
factory.create_test_panel_source(self.sim_params)
def tearDown(self):
teardown_logger(self)
def test_source_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[133]
)
algo.run(self.source)
self.assertEqual(len(algo.sources), 1)
assert isinstance(algo.sources[0], SpecificEquityTrades)
def test_invalid_order_parameters(self):
algo = InvalidOrderAlgorithm(
sids=[133],
sim_params=self.sim_params
)
algo.run(self.source)
def test_multi_source_as_input(self):
sim_params = SimulationParameters(
self.df.index[0],
self.df.index[-1]
)
algo = TestRegisterTransformAlgorithm(
sim_params=sim_params,
sids=[0, 1, 133]
)
algo.run([self.source, self.df_source], overwrite_sim_params=False)
self.assertEqual(len(algo.sources), 2)
def test_df_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1]
)
algo.run(self.df)
assert isinstance(algo.sources[0], DataFrameSource)
def test_panel_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1])
algo.run(self.panel)
assert isinstance(algo.sources[0], DataPanelSource)
def test_run_twice(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1]
)
res1 = algo.run(self.df)
res2 = algo.run(self.df)
np.testing.assert_array_equal(res1, res2)
def test_data_frequency_setting(self):
self.sim_params.data_frequency = 'daily'
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
)
self.assertEqual(algo.sim_params.data_frequency, 'daily')
self.sim_params.data_frequency = 'minute'
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
)
self.assertEqual(algo.sim_params.data_frequency, 'minute')
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,),
(TestTargetPercentAlgorithm,),
(TestTargetValueAlgorithm,),
])
def test_order_methods(self, algo_class):
algo = algo_class(
sim_params=self.sim_params,
)
algo.run(self.df)
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,),
(TestTargetValueAlgorithm,),
])
def test_order_methods_for_future(self, algo_class):
metadata = {0: {'asset_type': 'future',
'contract_multiplier': 10}}
algo = algo_class(
sim_params=self.sim_params,
asset_metadata=metadata
)
algo.run(self.df)
def test_order_method_style_forwarding(self):
method_names_to_test = ['order',
'order_value',
'order_percent',
'order_target',
'order_target_percent',
'order_target_value']
for name in method_names_to_test:
algo = TestOrderStyleForwardingAlgorithm(
sim_params=self.sim_params,
instant_fill=False,
method_name=name
)
algo.run(self.df)
def test_order_instant(self):
algo = TestOrderInstantAlgorithm(sim_params=self.sim_params,
instant_fill=True)
algo.run(self.df)
def test_minute_data(self):
source = RandomWalkSource(freq='minute',
start=pd.Timestamp('2000-1-3',
tz='UTC'),
end=pd.Timestamp('2000-1-4',
tz='UTC'))
self.sim_params.data_frequency = 'minute'
algo = TestOrderInstantAlgorithm(sim_params=self.sim_params,
instant_fill=True)
algo.run(source)
class TestPositions(TestCase):
def setUp(self):
setup_logger(self)
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
1,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
def tearDown(self):
teardown_logger(self)
def test_empty_portfolio(self):
algo = EmptyPositionsAlgorithm(sim_params=self.sim_params)
daily_stats = algo.run(self.df)
expected_position_count = [
0, # Before entering the first position
1, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(daily_stats.ix[i]['num_positions'],
expected)
def test_noop_orders(self):
algo = AmbitiousStopLimitAlgorithm(sid=1)
daily_stats = algo.run(self.source)
# Verify that possitions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
class TestAlgoScript(TestCase):
def setUp(self):
days = 251
self.sim_params = factory.create_simulation_parameters(num_days=days)
setup_logger(self)
trade_history = factory.create_trade_history(
133,
[10.0] * days,
[100] * days,
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(sids=[133],
event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
self.zipline_test_config = {
'sid': 0,
}
def tearDown(self):
teardown_logger(self)
def test_noop(self):
algo = TradingAlgorithm(initialize=initialize_noop,
handle_data=handle_data_noop)
algo.run(self.df)
def test_noop_string(self):
algo = TradingAlgorithm(script=noop_algo)
algo.run(self.df)
def test_api_calls(self):
algo = TradingAlgorithm(initialize=initialize_api,
handle_data=handle_data_api)
algo.run(self.df)
def test_api_calls_string(self):
algo = TradingAlgorithm(script=api_algo)
algo.run(self.df)
def test_api_get_environment(self):
platform = 'zipline'
metadata = {0: {'symbol': 'TEST',
'asset_type': 'equity'}}
algo = TradingAlgorithm(script=api_get_environment_algo,
asset_metadata=metadata,
platform=platform)
algo.run(self.df)
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
metadata = {0: {'symbol': 'TEST',
'asset_type': 'equity'}}
algo = TradingAlgorithm(script=api_symbol_algo,
asset_metadata=metadata)
algo.run(self.df)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data[0].price)
context.incr += 1""",
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
# this matches the value in the algotext initialize
# method, and will be used inside assert_single_position
# to confirm we have as many transactions as orders we
# placed.
self.zipline_test_config['order_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = assert_single_position(self, zipline)
# confirm the slippage and commission on a sample
# transaction
recorded_price = output[1]['daily_perf']['recorded_vars']['price']
transaction = output[1]['daily_perf']['transactions'][0]
self.assertEqual(100.0, transaction['commission'])
expected_spread = 0.05
expected_commish = 0.10
expected_price = recorded_price - expected_spread - expected_commish
self.assertEqual(expected_price, transaction['price'])
def test_volshare_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
set_commission(commission.PerShare(0.02))
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data[0].price)
record(volume=data[0].volume)
record(incr=context.incr)
context.incr += 1
""",
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 100
# 67 will be used inside assert_single_position
# to confirm we have as many transactions as expected.
# The algo places 2 trades of 5000 shares each. The trade
# events have volume ranging from 100 to 950. The volume cap
# of 0.3 limits the trade volume to a range of 30 - 316 shares.
# The spreadsheet linked below calculates the total position
# size over each bar, and predicts 67 txns will be required
# to fill the two orders. The number of bars and transactions
# differ because some bars result in multiple txns. See
# spreadsheet for details:
# https://www.dropbox.com/s/ulrk2qt0nrtrigb/Volume%20Share%20Worksheet.xlsx
self.zipline_test_config['expected_transactions'] = 67
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = assert_single_position(self, zipline)
# confirm the slippage and commission on a sample
# transaction
per_share_commish = 0.02
perf = output[1]
transaction = perf['daily_perf']['transactions'][0]
commish = transaction['amount'] * per_share_commish
self.assertEqual(commish, transaction['commission'])
self.assertEqual(2.029, transaction['price'])
def test_algo_record_vars(self):
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
self.assertEqual(len(output), 252)
incr = []
for o in output[:200]:
incr.append(o['daily_perf']['recorded_vars']['incr'])
np.testing.assert_array_equal(incr, range(1, 201))
def test_algo_record_allow_mock(self):
"""
Test that values from "MagicMock"ed methods can be passed to record.
Relevant for our basic/validation and methods like history, which
will end up returning a MagicMock instead of a DataFrame.
"""
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
test_algo.record(foo=MagicMock())
def _algo_record_float_magic_should_pass(self, var_type):
test_algo = TradingAlgorithm(
script=record_float_magic % var_type,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
self.assertEqual(len(output), 252)
incr = []
for o in output[:200]:
incr.append(o['daily_perf']['recorded_vars']['data'])
np.testing.assert_array_equal(incr, [np.nan] * 200)
def test_algo_record_nan(self):
self._algo_record_float_magic_should_pass('nan')
def test_order_methods(self):
"""
Only test that order methods can be called without error.
Correct filling of orders is tested in zipline.
"""
test_algo = TradingAlgorithm(
script=call_all_order_methods,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
def test_order_in_init(self):
"""
Test that calling order in initialize
will raise an error.
"""
with self.assertRaises(OrderDuringInitialize):
test_algo = TradingAlgorithm(
script=call_order_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
test_algo.run(self.source)
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_portfolio_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_account_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
class TestHistory(TestCase):
@classmethod
def setUpClass(cls):
cls._start = pd.Timestamp('1991-01-01', tz='UTC')
cls._end = pd.Timestamp('1991-01-15', tz='UTC')
cls.sim_params = factory.create_simulation_parameters(
data_frequency='minute',
)
@property
def source(self):
return RandomWalkSource(start=self._start, end=self._end)
def test_history(self):
history_algo = """
from zipline.api import history, add_history
def initialize(context):
add_history(10, '1d', 'price')
def handle_data(context, data):
df = history(10, '1d', 'price')
"""
algo = TradingAlgorithm(
script=history_algo,
sim_params=self.sim_params,
)
output = algo.run(self.source)
self.assertIsNot(output, None)
def test_history_without_add(self):
def handle_data(algo, data):
algo.history(1, '1m', 'price')
algo = TradingAlgorithm(
initialize=lambda _: None,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertIsNotNone(algo.history_container)
self.assertEqual(algo.history_container.buffer_panel.window_length, 1)
def test_add_history_in_handle_data(self):
def handle_data(algo, data):
algo.add_history(1, '1m', 'price')
algo = TradingAlgorithm(
initialize=lambda _: None,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertIsNotNone(algo.history_container)
self.assertEqual(algo.history_container.buffer_panel.window_length, 1)
class TestGetDatetime(TestCase):
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
if context.first_bar:
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
elif dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
start = to_utc('2014-01-02 9:31')
end = to_utc('2014-01-03 9:31')
source = RandomWalkSource(
start=start,
end=end,
)
sim_params = factory.create_simulation_parameters(
data_frequency='minute'
)
algo = TradingAlgorithm(
script=algo,
sim_params=sim_params,
identifiers=[1]
)
algo.run(source)
self.assertFalse(algo.first_bar)
class TestTradingControls(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
self.sid = 133
self.trade_history = factory.create_trade_history(
self.sid,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=self.trade_history)
def _check_algo(self,
algo,
handle_data,
expected_order_count,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.source)
self.assertEqual(algo.order_count, expected_order_count)
self.source.rewind()
def check_algo_succeeds(self, algo, handle_data, order_count=4):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, order_count, None)
def check_algo_fails(self, algo, handle_data, order_count):
self._check_algo(algo,
handle_data,
order_count,
TradingControlViolation)
def test_set_max_position_size(self):
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_succeeds(algo, handle_data)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_fails(algo, handle_data, 3)
# Buy two shares four times. Should bail due to max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=61.0)
self.check_algo_fails(algo, handle_data, 2)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid + 1,
max_shares=10,
max_notional=61.0)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(max_shares=10, max_notional=61.0)
self.check_algo_fails(algo, handle_data, 0)
def test_set_do_not_order_list(self):
# set the restricted list to be the sid, and fail.
algo = SetDoNotOrderListAlgorithm(
sid=self.sid,
restricted_list=[self.sid])
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
self.check_algo_fails(algo, handle_data, 0)
# set the restricted list to exclude the sid, and succeed
algo = SetDoNotOrderListAlgorithm(
sid=self.sid,
restricted_list=[134, 135, 136])
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
self.check_algo_succeeds(algo, handle_data)
def test_set_max_order_size(self):
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_succeeds(algo, handle_data)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=3,
max_notional=500.0)
self.check_algo_fails(algo, handle_data, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=40.0)
self.check_algo_fails(algo, handle_data, 3)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid + 1,
max_shares=1,
max_notional=1.0)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(max_shares=1,
max_notional=1.0)
self.check_algo_fails(algo, handle_data, 0)
def test_set_max_order_count(self):
# Override the default setUp to use six-hour intervals instead of full
# days so we can exercise trading-session rollover logic.
trade_history = factory.create_trade_history(
self.sid,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(hours=6),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
def handle_data(algo, data):
for i in range(5):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxOrderCountAlgorithm(3)
self.check_algo_fails(algo, handle_data, 3)
# Second call to handle_data is the same day as the first, so the last
# order of the second call should fail.
algo = SetMaxOrderCountAlgorithm(9)
self.check_algo_fails(algo, handle_data, 9)
# Only ten orders are placed per day, so this should pass even though
# in total more than 20 orders are placed.
algo = SetMaxOrderCountAlgorithm(10)
self.check_algo_succeeds(algo, handle_data, order_count=20)
def test_long_only(self):
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_fails(algo, handle_data, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_fails(algo, handle_data, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data)
algo.run(self.source)
self.source.rewind()
def test_asset_date_bounds(self):
# Run the algorithm with a sid that ends far in the future
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '1990-01-01',
'end_date': '2020-01-01'}}
asset_finder = AssetFinder()
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
algo.run(df_source)
# Run the algorithm with a sid that has already ended
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '1989-01-01',
'end_date': '1990-01-01'}}
asset_finder = AssetFinder()
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
with self.assertRaises(TradingControlViolation):
algo.run(df_source)
# Run the algorithm with a sid that has not started
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '2020-01-01',
'end_date': '2021-01-01'}}
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
with self.assertRaises(TradingControlViolation):
algo.run(df_source)
class TestAccountControls(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
self.sidint = 133
self.trade_history = factory.create_trade_history(
self.sidint,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=self.trade_history)
def _check_algo(self,
algo,
handle_data,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.source)
self.source.rewind()
def check_algo_succeeds(self, algo, handle_data):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, None)
def check_algo_fails(self, algo, handle_data):
self._check_algo(algo,
handle_data,
AccountControlViolation)
def test_set_max_leverage(self):
# Set max leverage to 0 so buying one share fails.
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = SetMaxLeverageAlgorithm(0)
self.check_algo_fails(algo, handle_data)
# Set max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = SetMaxLeverageAlgorithm(1)
self.check_algo_succeeds(algo, handle_data)
class TestClosePosAlgo(TestCase):
def setUp(self):
self.days = TradingEnvironment().trading_days
self.index = [self.days[0], self.days[1], self.days[2]]
self.panel = pd.Panel({1: pd.DataFrame({
'price': [1, 2, 4], 'volume': [1e9, 0, 0],
'type': [DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CLOSE_POSITION]},
index=self.index)
})
self.no_close_panel = pd.Panel({1: pd.DataFrame({
'price': [1, 2, 4], 'volume': [1e9, 0, 0],
'type': [DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE]},
index=self.index)
})
def test_close_position_equity(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'equity',
'end_date': self.days[3]}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.panel)
# Check results
expected_positions = [1, 1, 0]
expected_pnl = [0, 1, 2]
results = self.run_algo()
self.check_algo_pnl(results, expected_pnl)
self.check_algo_positions(results, expected_positions)
def test_close_position_future(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'future',
}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.panel)
# Check results
expected_positions = [1, 1, 0]
expected_pnl = [0, 1, 2]
results = self.run_algo()
self.check_algo_pnl(results, expected_pnl)
self.check_algo_positions(results, expected_positions)
def test_auto_close_future(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'future',
'notice_date': self.days[3],
'expiration_date': self.days[4]}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.no_close_panel)
# Check results
results = self.run_algo()
expected_pnl = [0, 1, 2]
self.check_algo_pnl(results, expected_pnl)
expected_positions = [1, 1, 0]
self.check_algo_positions(results, expected_positions)
def run_algo(self):
results = self.algo.run(self.data)
return results
def check_algo_pnl(self, results, expected_pnl):
for i, pnl in enumerate(results.pnl):
self.assertEqual(pnl, expected_pnl[i])
def check_algo_positions(self, results, expected_positions):
for i, amount in enumerate(results.positions):
if amount:
actual_position = amount[0]['amount']
else:
actual_position = 0
self.assertEqual(actual_position, expected_positions[i])
|
apache-2.0
|
alphaBenj/zipline
|
tests/test_finance.py
|
5
|
17083
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.finance package
"""
from datetime import datetime, timedelta
import os
from nose.tools import timed
import numpy as np
import pandas as pd
import pytz
from six import iteritems
from six.moves import range
from testfixtures import TempDirectory
from zipline.assets.synthetic import make_simple_equity_info
from zipline.finance.blotter import Blotter
from zipline.finance.execution import MarketOrder, LimitOrder
from zipline.finance.performance import PerformanceTracker
from zipline.finance.trading import SimulationParameters
from zipline.data.us_equity_pricing import BcolzDailyBarReader
from zipline.data.minute_bars import BcolzMinuteBarReader
from zipline.data.data_portal import DataPortal
from zipline.data.us_equity_pricing import BcolzDailyBarWriter
from zipline.finance.slippage import FixedSlippage
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.protocol import BarData
from zipline.testing import (
tmp_trading_env,
write_bcolz_minute_data,
)
from zipline.testing.fixtures import (
WithLogger,
WithTradingEnvironment,
ZiplineTestCase,
)
import zipline.utils.factory as factory
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
_multiprocess_can_split_ = False
class FinanceTestCase(WithLogger,
WithTradingEnvironment,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = 1, 2, 133
start = START_DATE = pd.Timestamp('2006-01-01', tz='utc')
end = END_DATE = pd.Timestamp('2006-12-31', tz='utc')
def init_instance_fixtures(self):
super(FinanceTestCase, self).init_instance_fixtures()
self.zipline_test_config = {'sid': 133}
# TODO: write tests for short sales
# TODO: write a test to do massive buying or shorting.
@timed(DEFAULT_TIMEOUT)
def test_partially_filled_orders(self):
# create a scenario where order size and trade size are equal
# so that orders must be spread out over several trades.
params = {
'trade_count': 360,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': 100,
'order_interval': timedelta(minutes=1),
# because we placed two orders for 100 shares each, and the volume
# of each trade is 100, and by default you can take up 2.5% of the
# bar's volume, the simulator should spread the order into 100
# trades of 2 shares per order.
'expected_txn_count': 100,
'expected_txn_volume': 2 * 100,
'default_slippage': True
}
self.transaction_sim(**params)
# same scenario, but with short sales
params2 = {
'trade_count': 360,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': -100,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 100,
'expected_txn_volume': 2 * -100,
'default_slippage': True
}
self.transaction_sim(**params2)
@timed(DEFAULT_TIMEOUT)
def test_collapsing_orders(self):
# create a scenario where order.amount <<< trade.volume
# to test that several orders can be covered properly by one trade,
# but are represented by multiple transactions.
params1 = {
'trade_count': 6,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
# because we placed an orders totaling less than 25% of one trade
# the simulator should produce just one transaction.
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params1)
# second verse, same as the first. except short!
params2 = {
'trade_count': 6,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': -1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': -24
}
self.transaction_sim(**params2)
# Runs the collapsed trades over daily trade intervals.
# Ensuring that our delay works for daily intervals as well.
params3 = {
'trade_count': 6,
'trade_interval': timedelta(days=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params3)
@timed(DEFAULT_TIMEOUT)
def test_alternating_long_short(self):
# create a scenario where we alternate buys and sells
params1 = {
'trade_count': int(6.5 * 60 * 4),
'trade_interval': timedelta(minutes=1),
'order_count': 4,
'order_amount': 10,
'order_interval': timedelta(hours=24),
'alternate': True,
'complete_fill': True,
'expected_txn_count': 4,
'expected_txn_volume': 0 # equal buys and sells
}
self.transaction_sim(**params1)
def transaction_sim(self, **params):
"""This is a utility method that asserts expected
results for conversion of orders to transactions given a
trade history
"""
trade_count = params['trade_count']
trade_interval = params['trade_interval']
order_count = params['order_count']
order_amount = params['order_amount']
order_interval = params['order_interval']
expected_txn_count = params['expected_txn_count']
expected_txn_volume = params['expected_txn_volume']
# optional parameters
# ---------------------
# if present, alternate between long and short sales
alternate = params.get('alternate')
# if present, expect transaction amounts to match orders exactly.
complete_fill = params.get('complete_fill')
asset1 = self.asset_finder.retrieve_asset(1)
metadata = make_simple_equity_info([asset1.sid], self.start, self.end)
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
if trade_interval < timedelta(days=1):
sim_params = factory.create_simulation_parameters(
start=self.start,
end=self.end,
data_frequency="minute"
)
minutes = self.trading_calendar.minutes_window(
sim_params.first_open,
int((trade_interval.total_seconds() / 60) * trade_count)
+ 100)
price_data = np.array([10.1] * len(minutes))
assets = {
asset1.sid: pd.DataFrame({
"open": price_data,
"high": price_data,
"low": price_data,
"close": price_data,
"volume": np.array([100] * len(minutes)),
"dt": minutes
}).set_index("dt")
}
write_bcolz_minute_data(
self.trading_calendar,
self.trading_calendar.sessions_in_range(
self.trading_calendar.minute_to_session_label(
minutes[0]
),
self.trading_calendar.minute_to_session_label(
minutes[-1]
)
),
tempdir.path,
iteritems(assets),
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
else:
sim_params = factory.create_simulation_parameters(
data_frequency="daily"
)
days = sim_params.sessions
assets = {
1: pd.DataFrame({
"open": [10.1] * len(days),
"high": [10.1] * len(days),
"low": [10.1] * len(days),
"close": [10.1] * len(days),
"volume": [100] * len(days),
"day": [day.value for day in days]
}, index=days)
}
path = os.path.join(tempdir.path, "testdata.bcolz")
BcolzDailyBarWriter(path, self.trading_calendar, days[0],
days[-1]).write(
assets.items()
)
equity_daily_reader = BcolzDailyBarReader(path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
if "default_slippage" not in params or \
not params["default_slippage"]:
slippage_func = FixedSlippage()
else:
slippage_func = None
blotter = Blotter(sim_params.data_frequency, slippage_func)
start_date = sim_params.first_open
if alternate:
alternator = -1
else:
alternator = 1
tracker = PerformanceTracker(sim_params, self.trading_calendar,
self.env)
# replicate what tradesim does by going through every minute or day
# of the simulation and processing open orders each time
if sim_params.data_frequency == "minute":
ticks = minutes
else:
ticks = days
transactions = []
order_list = []
order_date = start_date
for tick in ticks:
blotter.current_dt = tick
if tick >= order_date and len(order_list) < order_count:
# place an order
direction = alternator ** len(order_list)
order_id = blotter.order(
asset1,
order_amount * direction,
MarketOrder())
order_list.append(blotter.orders[order_id])
order_date = order_date + order_interval
# move after market orders to just after market next
# market open.
if order_date.hour >= 21:
if order_date.minute >= 00:
order_date = order_date + timedelta(days=1)
order_date = order_date.replace(hour=14, minute=30)
else:
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: tick,
data_frequency=sim_params.data_frequency,
trading_calendar=self.trading_calendar,
restrictions=NoRestrictions(),
)
txns, _, closed_orders = blotter.get_transactions(bar_data)
for txn in txns:
tracker.process_transaction(txn)
transactions.append(txn)
blotter.prune_orders(closed_orders)
for i in range(order_count):
order = order_list[i]
self.assertEqual(order.asset, asset1)
self.assertEqual(order.amount, order_amount * alternator ** i)
if complete_fill:
self.assertEqual(len(transactions), len(order_list))
total_volume = 0
for i in range(len(transactions)):
txn = transactions[i]
total_volume += txn.amount
if complete_fill:
order = order_list[i]
self.assertEqual(order.amount, txn.amount)
self.assertEqual(total_volume, expected_txn_volume)
self.assertEqual(len(transactions), expected_txn_count)
cumulative_pos = tracker.position_tracker.positions[asset1]
if total_volume == 0:
self.assertIsNone(cumulative_pos)
else:
self.assertEqual(total_volume, cumulative_pos.amount)
# the open orders should not contain the asset.
oo = blotter.open_orders
self.assertNotIn(
asset1,
oo,
"Entry is removed when no open orders"
)
def test_blotter_processes_splits(self):
blotter = Blotter('daily', equity_slippage=FixedSlippage())
# set up two open limit orders with very low limit prices,
# one for sid 1 and one for sid 2
asset1 = self.asset_finder.retrieve_asset(1)
asset2 = self.asset_finder.retrieve_asset(2)
asset133 = self.asset_finder.retrieve_asset(133)
blotter.order(asset1, 100, LimitOrder(10))
blotter.order(asset2, 100, LimitOrder(10))
# send in splits for assets 133 and 2. We have no open orders for
# asset 133 so it should be ignored.
blotter.process_splits([(asset133, 0.5), (asset2, 0.3333)])
for asset in [asset1, asset2]:
order_lists = blotter.open_orders[asset]
self.assertIsNotNone(order_lists)
self.assertEqual(1, len(order_lists))
asset1_order = blotter.open_orders[1][0]
asset2_order = blotter.open_orders[2][0]
# make sure the asset1 order didn't change
self.assertEqual(100, asset1_order.amount)
self.assertEqual(10, asset1_order.limit)
self.assertEqual(1, asset1_order.asset)
# make sure the asset2 order did change
# to 300 shares at 3.33
self.assertEqual(300, asset2_order.amount)
self.assertEqual(3.33, asset2_order.limit)
self.assertEqual(2, asset2_order.asset)
class TradingEnvironmentTestCase(WithLogger,
WithTradingEnvironment,
ZiplineTestCase):
"""
Tests for date management utilities in zipline.finance.trading.
"""
def test_simulation_parameters(self):
sp = SimulationParameters(
start_session=pd.Timestamp("2008-01-01", tz='UTC'),
end_session=pd.Timestamp("2008-12-31", tz='UTC'),
capital_base=100000,
trading_calendar=self.trading_calendar,
)
self.assertTrue(sp.last_close.month == 12)
self.assertTrue(sp.last_close.day == 31)
@timed(DEFAULT_TIMEOUT)
def test_sim_params_days_in_period(self):
# January 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
params = SimulationParameters(
start_session=pd.Timestamp("2007-12-31", tz='UTC'),
end_session=pd.Timestamp("2008-01-07", tz='UTC'),
capital_base=100000,
trading_calendar=self.trading_calendar,
)
expected_trading_days = (
datetime(2007, 12, 31, tzinfo=pytz.utc),
# Skip new years
# holidays taken from: http://www.nyse.com/press/1191407641943.html
datetime(2008, 1, 2, tzinfo=pytz.utc),
datetime(2008, 1, 3, tzinfo=pytz.utc),
datetime(2008, 1, 4, tzinfo=pytz.utc),
# Skip Saturday
# Skip Sunday
datetime(2008, 1, 7, tzinfo=pytz.utc)
)
num_expected_trading_days = 5
self.assertEquals(
num_expected_trading_days,
len(params.sessions)
)
np.testing.assert_array_equal(expected_trading_days,
params.sessions.tolist())
|
apache-2.0
|
mlyundin/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
Karel-van-de-Plassche/QLKNN-develop
|
tests/gen4_test_files/hypercube_to_pandas.py
|
2
|
18564
|
import time
from itertools import product
import gc
import os
import copy
import shutil
import numpy as np
import xarray as xr
import pandas as pd
#from dask.distributed import Client, get_client
from dask.diagnostics import visualize, ProgressBar
from dask.diagnostics import Profiler, ResourceProfiler, CacheProfiler
import dask.dataframe as dd
from IPython import embed
from qlknn.dataset.data_io import store_format, sep_prefix
from qlknn.misc.analyse_names import is_transport
try:
profile
except NameError:
from qlknn.misc.tools import profile
from qualikiz_tools.qualikiz_io.outputfiles import xarray_to_pandas
from qlknn.misc.tools import notify_task_done
GAM_LEQ_GB_TMP_PATH = 'gam_cache.nc'
dummy_var = 'efe_GB'
@profile
def metadatize(ds):
""" Move all non-axis dims to metadata """
scan_dims = [dim for dim in ds.dims
if dim not in ['kthetarhos', 'nions', 'numsols']]
metadata = {}
for name in ds.coords:
if (all([dim not in scan_dims for dim in ds[name].dims]) and
name not in ['kthetarhos', 'nions', 'numsols']):
metadata[name] = ds[name].values
ds = ds.drop(name)
ds.attrs.update(metadata)
return ds
@profile
def absambi(ds):
""" Calculate absambi; ambipolairity check (d(charge)/dt ~ 0)
Args:
ds: Dataset containing pf[i|e]_GB, normni and Zi
Returns:
ds: ds with absambi: sum(pfi * normni * Zi) / pfe
"""
ds['absambi'] = ((ds['pfi_GB'] * ds['normni'] * ds['Zi']).sum('nions')
/ ds['pfe_GB'])
ds['absambi'] = xr.where(ds['pfe_GB'] == 0, 1, ds['absambi'])
return ds
@profile
def calculate_normni(ds):
""" Calculate ninorm from Zeff and Nex. Assumes two ions"""
Z0, Z1 = ds['Zi']
Zeff = ds['Zeff']
ninorm1 = (Zeff - Z0) / (Z1 **2 - Z1 * Z0)
ninorm0 = (1 - Z1 * ninorm1) / Z0
ds['normni'] = xr.concat([ninorm0, ninorm1], dim='nions')
return ds
@profile
def calculate_rotdivs(ds, rotdim='Machtor'):
""" Calculate the rotdiv vars
Rotdiv vars are variables of the form flux_without_rotation
divided by flux_with_rotation, for example efe_GB_rot0_div_efe_GB
This is used the train the rotdiv networks.
"""
for var_name in [col for col in ds.data_vars if is_transport(col)]:
rotdiv_name = var_name + '_rot0_div_' + var_name
var = ds[var_name]
rotdiv = var.sel({rotdim: 0}) / var
rotdiv = xr.where(var == 0, 0, rotdiv)
ds[rotdiv_name] = rotdiv
return ds
@profile
def determine_stability(ds):
""" Determine if a point is TEM or ITG unstable. True if unstable """
kthetarhos = ds['kthetarhos']
bound_idx = len(kthetarhos[kthetarhos <= 2])
ome = ds['ome_GB']
gam_leq = ds['gam_leq_GB']
ome = ome.isel(kthetarhos=slice(None, bound_idx))
ion_unstable = (gam_leq != 0)
ds['TEM'] = ion_unstable & (ome > 0).any(dim=['kthetarhos', 'numsols'])
ds['ITG'] = ion_unstable & (ome < 0).any(dim=['kthetarhos', 'numsols'])
return ds
@profile
def sum_pf(df=None, vt=None, vr=0, vc=None, An=None):
""" Calculate particle flux from diffusivity and pinch"""
pf = df * An + vt + vr + vc
return pf
@profile
def sum_pinch(ds):
""" Sum all pinch terms together to va[i|e]_GB """
for mode in ['', 'ITG', 'TEM']:
ds['vae' + mode + '_GB'] = ds['vte' + mode + '_GB'] + ds['vce' + mode + '_GB']
ds['vai' + mode + '_GB'] = ds['vti' + mode + '_GB'] + ds['vci' + mode + '_GB']
if 'vri' + mode + '_GB' in ds:
ds['vai' + mode + '_GB'] += ds['vri' + mode + '_GB']
else:
print('Warning! vai' + mode + '_GB not found!')
return ds
@profile
def calculate_particle_sepfluxes(ds):
""" Calculate pf[i|e][ITG|TEM] from diffusivity and pinch
This is needed because of a bug in QuaLiKiz 2.4.0 in which
pf[i|e][ITG|TEM] was not written to file. Fortunately,
this can be re-calculated from the saved diffusivity
and pinch files: df,vt,vc, and vr. Will not do anything
if already contained in ds.
"""
for spec, mode in product(['e', 'i'], ['ITG', 'TEM']):
pf_name = 'pf' + spec + mode + '_GB'
if pf_name not in ds:
fluxes = ['df', 'vt', 'vc']
if spec == 'i':
fluxes.append('vr')
parts = {flux: flux + spec + mode + '_GB' for flux in fluxes}
parts = {flux: ds[part] for flux, part in parts.items()}
pf = sum_pf(**parts, An=ds['An'])
pf.name = pf_name
ds[pf.name] = pf
return ds
@profile
def remove_rotation(ds):
""" Drop the rotation-related variables from dataset """
for value in ['vfiTEM_GB', 'vfiITG_GB', 'vriTEM_GB', 'vriITG_GB']:
try:
ds = ds.drop(value)
except ValueError:
print('{!s} already removed'.format(value))
return ds
def open_with_disk_chunks(path, dask=True, one_chunk=False):
""" Determine the on-disk chunk sizes and open dataset
Best performance in Dask is achieved if the on-disk chunks
(as saved by xarray) are aligned with the Dask chunks.
This function assumes all variables are chunked have the
same on-disk chunks (the xarray default)
"""
ds = xr.open_dataset(path)
if dask:
if 'efe_GB' in ds.data_vars:
chunk_sizes = ds['efe_GB']._variable._encoding['chunksizes']
dims = ds['efe_GB']._variable.dims
else:
raise Exception('Could not figure out base chunk sizes, no efe_GB')
for dim in ds.dims:
if dim not in dims:
for var in ds.data_vars.values():
if dim in var.dims:
idx = var._variable.dims.index(dim)
dims += (dim,)
chunk_sizes += (var._variable._encoding['chunksizes'][idx],)
break
not_in_dims = set(ds.dims) - set(dims)
if len(not_in_dims) != 0:
print('{!s} not in dims, but are dims of dataset'.format(not_in_dims))
ds.close()
# Re-open dataset with on-disk chunksizes
if one_chunk:
chunks = {dim: length for dim, length in ds.dims.items()}
else:
chunks = dict(zip(dims, chunk_sizes))
ds_kwargs = {
'chunks': chunks,
#'cache': True
#'lock': lock
}
ds = xr.open_dataset(path,
**ds_kwargs)
else:
ds_kwargs = {}
return ds, ds_kwargs
def gcd(x, y):
"""Euclidian algorithm to find Greatest Common Devisor of two numbers"""
while(y):
x, y = y, x % y
return x
def get_dims_chunks(var):
""" Get the current dask chunks of a given variable """
if var.chunks is not None:
if isinstance(var.chunks, dict):
# xarray-style
sizes = var.chunks
chunksizes = [sizes[dim][0] if sizes[dim][:-1] == sizes[dim][1:] else None
for dim in var.dims]
if isinstance(var.chunks, tuple):
# dask-style
chunksizes = []
for sizes in var.chunks:
if sizes[1:] == sizes[:-1]: #If all are equal
chunksizes.append(sizes[0])
elif np.unique(sizes).size == 2:
chunksizes.append(gcd(np.unique(sizes)[0], np.unique(sizes)[1]))
else:
chunksizes.append(reduce(lambda x,y:gcd([x,y]),np.unique(sizes)))
if None in chunksizes:
raise Exception('Unequal size for one of the chunks in {!s}'.format(var.chunks))
else:
print('No chunks for {!s}'.format(var.name))
chunksizes = None
return chunksizes
@profile
def calculate_grow_vars(ds):
""" Calculate maxiumum growth-rate based variables """
kthetarhos = ds['kthetarhos']
bound_idx = len(kthetarhos[kthetarhos <= 2])
gam = ds['gam_GB']
gam = gam.max('numsols')
gam_leq = gam.isel(kthetarhos=slice(None, bound_idx))
gam_leq = gam_leq.max('kthetarhos')
gam_leq.name = 'gam_leq_GB'
if kthetarhos.size > bound_idx:
gam_great = gam.isel(kthetarhos=slice(bound_idx + 1, None))
gam_great = gam_great.max('kthetarhos')
gam_great.name = 'gam_great_GB'
else:
gam_great = None
return gam_leq, gam_great
@profile
def merge_gam_leq_great(ds, ds_kwargs=None, rootdir='.', use_disk_cache=False, starttime=None):
""" Calculate and cache (or load from cache) gam_leq and gam_great
As advised by xarray http://xarray.pydata.org/en/stable/dask.html#optimization-tips
save gam_leq (which is an intermediate result) to disk.
Args:
ds: xarray.Dataset containing gam_GB
Kwargs:
ds_kwargs: xarray.Dataset kwargs passed to xr.open_dataset. [Default: None]
rootdir: Directory where the cache will be saved/is saved
use_disk_cache: Just load an already cached dataset [Default: False]
starttime: Time the script was started. All debug timetraces will be
relative to this point. [Default: current time]
Returns:
ds: The xarray.Dataset with gam_leq and gam_great merged in
"""
if ds_kwargs is None:
ds_kwargs = {}
if starttime is None:
starttime = time.time()
gam_cache_dir = os.path.join(rootdir, GAM_LEQ_GB_TMP_PATH)
if not use_disk_cache:
gam_leq, gam_great = calculate_grow_vars(ds)
chunksizes = get_dims_chunks(gam_leq)
# Perserve chunks/encoding from gam_GB
encoding = {}
for key in ['zlib', 'shuffle', 'complevel', 'dtype', ]:
encoding[key] = ds['gam_GB'].encoding[key]
if chunksizes is not None:
encoding['chunksizes'] = chunksizes
# We assume this fits in RAM, so load before writing to get some extra speed
gam_leq.load()
gam_leq.to_netcdf(gam_cache_dir, encoding={gam_leq.name: encoding})
if gam_great is not None:
gam_great.load()
gam_great.to_netcdf(gam_cache_dir, encoding={gam_great.name: encoding}, mode='a')
# Now open the cache with the same args as the original dataset, as we
# aggregated over kthetarhos and numsols, remove them from the chunk list
kwargs = copy.deepcopy(ds_kwargs)
if chunksizes is not None:
chunks = kwargs.pop('chunks')
for key in list(chunks.keys()):
if key not in gam_leq.dims:
chunks.pop(key)
else:
chunks = None
# Finally, open and merge the cache
ds_gam = xr.open_dataset(os.path.join(rootdir, GAM_LEQ_GB_TMP_PATH),
chunks=chunks,
**kwargs)
ds = ds.merge(ds_gam.data_vars)
return ds
def compute_and_save_var(ds, new_ds_path, varname, chunks=None, starttime=None):
""" Save a variable from the given dataset in a new dataset
Args:
ds: xarray.Dataset containing gam_GB
new_ds_path: The path of the target new dataset (string)
varname: Name of the variable to save. Should be in ds.
chunks: A dict with the on-disk chunkage per dimention.
Kwargs:
starttime: Time the script was started. All debug timetraces will be
relative to this point. [Default: current time]
"""
if starttime is None:
starttime = time.time()
#var = ds[varname]
encoding = {varname: {'zlib': True}}
if chunks is not None:
encoding[varname]['chunksizes'] = [chunks[dim] for dim in ds[varname].dims]
#var.load()
ds[varname].to_netcdf(new_ds_path, 'a',
encoding=encoding)
notify_task_done(varname + ' saved', starttime)
@profile
def compute_and_save(ds, new_ds_path, chunks=None, starttime=None):
""" Sequentially load all data_vars in RAM and write to new dataset
This function forcibly loads variables in RAM, also triggering any
lazy-loaded dask computations. We thus assume the result of each of
these calculations fits in RAM. This is done because as of xarray
'0.10.4', aligning on-disk chunks and dask chunks is still work in
progress.
Args:
ds: Dataset to write to new dataset
new_ds_path: Absolute path to of the netCDF4 file that will
be generated
Kwargs:
chunks: Dict with the dimension: chunksize for the new ds file
starttime: Time the script was started. All debug timetraces will be
relative to this point. [Default: current time]
"""
if starttime is None:
starttime = time.time()
new_ds = xr.Dataset()
new_ds.attrs = ds.attrs
for coord in ds.coords:
new_ds.coords[coord] = ds[coord]
new_ds.to_netcdf(new_ds_path)
notify_task_done('Coords saving', starttime)
data_vars = list(ds.data_vars)
calced_dims = ['gam_leq_GB', 'gam_great_GB', 'TEM', 'ITG', 'absambi']
for spec, mode in product(['e', 'i'], ['ITG', 'TEM']):
flux = 'pf'
calced_dims.append(flux + spec + mode + '_GB')
for calced_dim in reversed(calced_dims):
if calced_dim in ds:
data_vars.insert(0, data_vars.pop(data_vars.index(calced_dim)))
for ii, varname in enumerate(data_vars):
print('starting {:2d}/{:2d}: {!s}'.format(ii + 1, len(data_vars), varname))
compute_and_save_var(ds, new_ds_path, varname, chunks, starttime=starttime)
@profile
def save_prepared_ds(ds, prepared_ds_path, starttime=None, ds_kwargs=None):
""" Save a prepared dataset to disk
Will use dask to write if dataset had 'chuncks', and will do some
profiling as well. Otherwise, just write with xarray.
"""
if starttime is None:
starttime = time.time()
if ds_kwargs is None:
ds_kwargs = {}
if 'chunks' in ds_kwargs:
with Profiler() as prof, ResourceProfiler(dt=0.25) as rprof, CacheProfiler() as cprof:
compute_and_save(ds, prepared_ds_path, chunks=ds_kwargs['chunks'], starttime=starttime)
notify_task_done('prep_megarun', starttime)
visualize([prof, rprof, cprof], file_path='profile_prep.html', show=False)
else:
compute_and_save(ds, prepared_ds_path, starttime=starttime)
@profile
def create_input_cache(ds, cachedir):
""" Create on-disk cache of the unfolded hypercube dims
This function uses the native `xarray.Dataset.to_dataframe()` function
to unfold the dims in (2D) table format, the classical `pandas.DataFrame`.
As this operation uses a lot of RAM, we cache the index one-by-one to
disk, and later glue it together using `input_hdf5_from_cache`. The
on-disk format is parquet.
Attrs:
ds: The dataset of which the dims/indices will be unfolded
cachedir: Path where the cache folder should be made. WARNING!
OVERRIDES EXISTING FOLDERS!
"""
# Convert to MultiIndexed DataFrame. Use efe_GB as dummy variable to get the right index
input_names = list(ds[dummy_var].dims)
dtype = ds[dummy_var].dtype
input_df = ds['dimx'].to_dataframe()
#input_df.drop(input_df.columns[0], axis=1, inplace=True)
# Create empty cache dir
if os.path.exists(cachedir):
shutil.rmtree(cachedir)
os.mkdir(cachedir)
# Now unfold the MultiIndex one-by-one
num_levels = len(input_df.index.levels)
for ii in range(num_levels):
input_df.reset_index(level=0, inplace=True)
varname = input_df.columns[0]
print('starting {:2d}/{:2d}: {!s}'.format(ii + 1, num_levels, varname))
df = input_df[['dimx', varname]].copy(True)
del input_df[varname]
df.reset_index(inplace=True, drop=True)
df = df.astype(dtype, copy=False)
cachefile = os.path.join(cachedir, varname)
ddf = dd.from_array(df.values, chunksize=len(df) // 10, columns=df.columns)
ddf.to_parquet(cachefile + '.parquet', write_index=False)
del df, ddf
gc.collect()
@profile
def input_hdf5_from_cache(store_name, cachedir, columns=None, mode='w', compress=True):
""" Create HDF5 file using cache from `create_input_cache`
The contents of cachedir are read into memory, then they are
concatenated and saved to a single HDF5 file
Attrs:
store_name: Name of the HDF5 store/file that will be written to
cachdir: Path that will be scanned for cache files
Kwargs:
mode: Mode to be passed to `to_hdf`. Overwrite by
default [Default: 'w']
compress: Compress the on-disk HDF5 [Default: True]
"""
if compress:
panda_kwargs = dict(complib='zlib', complevel=1)
files = [os.path.join(cachedir, name) for name in os.listdir(cachedir)]
ddfs = []
for name in files:
ddf = dd.read_parquet(name)
ddf['dimx'] = ddf['dimx'].astype('int64')
ddf = ddf.set_index('dimx')
ddfs.append(ddf)
input_ddf = dd.concat(ddfs, axis=1)
#input_ddf['dimx'] = 1
#input_ddf['dimx'] = input_ddf['dimx'].cumsum() - 1
#input_ddf = input_ddf.set_index('dimx', drop=True)
input_ddf = input_ddf.loc[:, columns]
input_ddf.to_hdf(store_name, 'input', mode=mode, **panda_kwargs)
@profile
def data_hdf5_from_ds(ds, store_name, compress=True):
""" Add data_vars from ds to HDF5 file one-by-one
Attrs:
ds: The dataset from which to write the data_vars
store_name: Name of the HDF5 store/file that will be written to
Kwargs:
compress: Compress the on-disk HDF5 [Default: True]
"""
if compress:
panda_kwargs = dict(complib='zlib', complevel=1)
for ii, varname in enumerate(ds.data_vars):
print('starting {:2d}/{:2d}: {!s}'.format(ii + 1, len(ds.data_vars), varname))
#ddf = ds[[varname]].to_dask_dataframe()
#da = ds.variables[varname].data
df = ds[['dimx', varname]].to_dataframe()
df.reset_index(inplace=True, drop=True)
df = df.set_index('dimx')
df.sort_index(inplace=True)
df.to_hdf(store_name, sep_prefix + varname , format='table', **panda_kwargs)
#da.to_hdf5(store_name, '/output/' + varname, compression=compress)
def save_attrs(attrs, store_name):
""" Save a dictionary to the 'constants' field in the speciefied HDF5 store"""
store = pd.HDFStore(store_name)
store['constants'] = pd.Series(attrs)
store.close()
|
mit
|
vorwerkc/pymatgen
|
pymatgen/core/structure.py
|
3
|
165217
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
import collections
import functools
import itertools
import json
import math
import os
import random
import re
import warnings
from abc import ABCMeta, abstractmethod
from fnmatch import fnmatch
from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union, Callable
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from tabulate import tabulate
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice, get_points_in_spheres
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.core.units import Length, Mass
from pymatgen.util.coord import all_distances, get_angle, lattice_points_in_supercell
from pymatgen.util.typing import ArrayLike, CompositionLike, SpeciesLike
class Neighbor(Site):
"""
Simple Site subclass to contain a neighboring atom that skips all the unnecessary checks for speed. Can be
used as a fixed-length tuple of size 3 to retain backwards compatibility with past use cases.
(site, nn_distance, index).
In future, usage should be to call attributes, e.g., Neighbor.index, Neighbor.distance, etc.
"""
def __init__(
self,
species: Composition,
coords: np.ndarray,
properties: dict = None,
nn_distance: float = 0.0,
index: int = 0,
):
"""
:param species: Same as Site
:param coords: Same as Site, but must be fractional.
:param properties: Same as Site
:param nn_distance: Distance to some other Site.
:param index: Index within structure.
"""
self.coords = coords
self._species = species
self.properties = properties or {}
self.nn_distance = nn_distance
self.index = index
def __len__(self):
"""
Make neighbor Tuple-like to retain backwards compatibility.
"""
return 3
def __getitem__(self, i: int): # type: ignore
"""
Make neighbor Tuple-like to retain backwards compatibility.
:param i:
:return:
"""
return (self, self.nn_distance, self.index)[i]
class PeriodicNeighbor(PeriodicSite):
"""
Simple PeriodicSite subclass to contain a neighboring atom that skips all
the unnecessary checks for speed. Can be used as a fixed-length tuple of
size 4 to retain backwards compatibility with past use cases.
(site, distance, index, image).
In future, usage should be to call attributes, e.g., PeriodicNeighbor.index,
PeriodicNeighbor.distance, etc.
"""
def __init__(
self,
species: Composition,
coords: np.ndarray,
lattice: Lattice,
properties: dict = None,
nn_distance: float = 0.0,
index: int = 0,
image: tuple = (0, 0, 0),
):
"""
:param species: Same as PeriodicSite
:param coords: Same as PeriodicSite, but must be fractional.
:param lattice: Same as PeriodicSite
:param properties: Same as PeriodicSite
:param nn_distance: Distance to some other Site.
:param index: Index within structure.
:param image: PeriodicImage
"""
self._lattice = lattice
self._frac_coords = coords
self._species = species
self.properties = properties or {}
self.nn_distance = nn_distance
self.index = index
self.image = image
@property # type: ignore
def coords(self) -> np.ndarray: # type: ignore
"""
:return: Cartesian coords.
"""
return self._lattice.get_cartesian_coords(self._frac_coords)
def __len__(self):
"""
Make neighbor Tuple-like to retain backwards compatibility.
"""
return 4
def __getitem__(self, i: int): # type: ignore
"""
Make neighbor Tuple-like to retain backwards compatibility.
:param i:
:return:
"""
return (self, self.nn_distance, self.index, self.image)[i]
class SiteCollection(collections.abc.Sequence, metaclass=ABCMeta):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@property
@abstractmethod
def sites(self):
"""
Returns a tuple of sites.
"""
@abstractmethod
def get_distance(self, i: int, j: int) -> float:
"""
Returns distance between sites at index i and j.
Args:
i: Index of first site
j: Index of second site
Returns:
Distance between sites at index i and index j.
"""
@property
def distance_matrix(self) -> np.ndarray:
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self) -> List[Composition]:
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Species]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self) -> List[Composition]:
"""
List of species and occupancies at each site of the structure.
"""
return [site.species for site in self]
@property
def ntypesp(self) -> int:
"""Number of types of atoms."""
return len(self.types_of_species)
@property
def types_of_species(self) -> Tuple[Union[Element, Species, DummySpecies]]:
"""
List of types of specie.
"""
# Cannot use set since we want a deterministic algorithm.
types = [] # type: List[Union[Element, Species, DummySpecies]]
for site in self:
for sp, v in site.species.items():
if v != 0:
types.append(sp)
return tuple(sorted(set(types))) # type: ignore
@property
def types_of_specie(self) -> Tuple[Union[Element, Species, DummySpecies]]:
"""
Specie->Species rename. Maintained for backwards compatibility.
"""
return self.types_of_species
def group_by_types(self) -> Iterator[Union[Site, PeriodicSite]]:
"""Iterate over species grouped by type"""
for t in self.types_of_species:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol: str) -> Tuple[int, ...]:
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species) if specie.symbol == symbol))
@property
def symbol_set(self) -> Tuple[str]:
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple(sorted(specie.symbol for specie in self.types_of_species)) # type: ignore
@property # type: ignore
def atomic_numbers(self) -> Tuple[int]:
"""List of atomic numbers."""
return tuple(site.specie.Z for site in self) # type: ignore
@property
def site_properties(self) -> Dict[str, List]:
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {} # type: Dict[str, List]
prop_keys = set() # type: Set[str]
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self) -> int:
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a np.array of the cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self) -> str:
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self) -> Composition:
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float) # type: Dict[Species, float]
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self) -> float:
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self) -> bool:
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i: int, j: int, k: int) -> float:
"""
Returns angle specified by three sites.
Args:
i: Index of first site.
j: Index of second site.
k: Index of third site.
Returns:
Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
"""
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23), np.dot(v12, v23)))
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt: str = None, filename: str = None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
@classmethod
@abstractmethod
def from_str(cls, input_string: str, fmt: str):
"""
Reads in SiteCollection from a string.
"""
@classmethod
@abstractmethod
def from_file(cls, filename: str):
"""
Reads in SiteCollection from a filename.
"""
def add_site_property(self, property_name: str, values: List):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self.sites):
raise ValueError("Values must be same length as sites.")
for site, val in zip(self.sites, values):
site.properties[property_name] = val
def remove_site_property(self, property_name):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
"""
for site in self.sites:
del site.properties[property_name]
def replace_species(self, species_mapping: Dict[SpeciesLike, SpeciesLike]):
"""
Swap species.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
sp_mapping = {get_el_sp(k): v for k, v in species_mapping.items()}
sp_to_replace = set(sp_mapping.keys())
sp_in_structure = set(self.composition.keys())
if not sp_in_structure.issuperset(sp_to_replace):
warnings.warn(
"Some species to be substituted are not present in "
"structure. Pls check your input. Species to be "
"substituted = %s; Species in structure = %s" % (sp_to_replace, sp_in_structure)
)
for site in self.sites:
if sp_to_replace.intersection(site.species):
c = Composition()
for sp, amt in site.species.items():
new_sp = sp_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
site.species = c
def add_oxidation_state_by_element(self, oxidation_states: Dict[str, float]):
"""
Add oxidation states.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for site in self.sites:
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Species(sym, oxidation_states[sym])] = occu
site.species = Composition(new_sp)
except KeyError:
raise ValueError("Oxidation state of all elements must be " "specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states: List[float]):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
if len(oxidation_states) != len(self.sites):
raise ValueError("Oxidation states of all sites must be " "specified.")
for site, ox in zip(self.sites, oxidation_states):
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Species(sym, ox)] = occu
site.species = Composition(new_sp)
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for el, occu in site.species.items():
sym = el.symbol
new_sp[Element(sym)] += occu
site.species = Composition(new_sp)
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or [{e.symbol: 0 for e in self.composition}]
self.add_oxidation_state_by_element(oxid_guess[0])
def add_spin_by_element(self, spins: Dict[str, float]):
"""
Add spin states to a structure.
Args:
spins (dict): Dict of spins associated with elements or species,
e.g. {"Ni":+5} or {"Ni2+":5}
"""
for site in self.sites:
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[
Species(
sym,
oxidation_state=oxi_state,
properties={"spin": spins.get(str(sp), spins.get(sym, None))},
)
] = occu
site.species = Composition(new_sp)
def add_spin_by_site(self, spins: List[float]):
"""
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
"""
if len(spins) != len(self.sites):
raise ValueError("Spin of all sites must be " "specified in the dictionary.")
for site, spin in zip(self.sites, spins):
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Species(sym, oxidation_state=oxi_state, properties={"spin": spin})] = occu
site.species = Composition(new_sp)
def remove_spin(self):
"""
Removes spin states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for sp, occu in site.species.items():
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Species(sp.symbol, oxidation_state=oxi_state)] += occu
site.species = new_sp
def extract_cluster(self, target_sites: List[Site], **kwargs):
r"""
Extracts a cluster of atoms based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
**kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
[Site/PeriodicSite] Cluster of atoms.
"""
cluster = list(target_sites)
others = [site for site in self if site not in cluster]
size = 0
while len(cluster) > size:
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return cluster
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(
self,
lattice: Union[ArrayLike, Lattice],
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None,
):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(
"The list of atomic species must be of the" " same length as the list of fractional" " coordinates."
)
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i, sp in enumerate(species):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(
PeriodicSite(
sp,
coords[i],
self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop,
)
)
self._sites: Tuple[PeriodicSite, ...] = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ", "less than 0.01 Angstrom apart!"))
self._charge = charge
@classmethod
def from_sites(
cls,
sites: List[PeriodicSite],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
charge: Charge of structure.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" % cls)
prop_keys = [] # type: List[str]
props = {}
lattice = sites[0].lattice
for i, site in enumerate(sites):
if site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values " "are set to None." % k)
return cls(
lattice,
[site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
)
@classmethod
def from_spacegroup(
cls,
sg: str,
lattice: Union[List, np.ndarray, Lattice],
species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],
coords: Sequence[Sequence[float]],
site_properties: Dict[str, Sequence] = None,
coords_are_cartesian: bool = False,
tol: float = 1e-5,
) -> Union["IStructure", "Structure"]:
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.parameters, sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are " "different!" % (len(species), len(coords))
)
frac_coords = (
np.array(coords, dtype=np.float_) if not coords_are_cartesian else latt.get_fractional_coords(coords)
)
props = {} if site_properties is None else site_properties
all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]
all_coords = [] # type: List[List[float]]
all_site_properties = collections.defaultdict(list) # type: Dict[str, List]
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords, site_properties=all_site_properties)
@classmethod
def from_magnetic_spacegroup(
cls,
msg: Union[str, "MagneticSpaceGroup"], # type: ignore # noqa: F821
lattice: Union[List, np.ndarray, Lattice],
species: Sequence[Union[str, Element, Species, DummySpecies, Composition]],
coords: Sequence[Sequence[float]],
site_properties: Dict[str, Sequence],
coords_are_cartesian: bool = False,
tol: float = 1e-5,
) -> Union["IStructure", "Structure"]:
"""
Generate a structure using a magnetic spacegroup. Note that only
symmetrically distinct species, coords and magmoms should be provided.]
All equivalent sites are generated from the spacegroup operations.
Args:
msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):
The magnetic spacegroup.
If a string, it will be interpreted as one of the notations
supported by MagneticSymmetryGroup, e.g., "R-3'c" or "Fm'-3'm".
If a list of two ints, it will be interpreted as the number of
the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Unlike Structure.from_spacegroup(),
this argument is mandatory, since magnetic moment information
has to be included. Note that the *direction* of the supplied
magnetic moment relative to the crystal is important, even if
the resulting structure is used for collinear calculations.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
if "magmom" not in site_properties:
raise ValueError("Magnetic moments have to be defined.")
magmoms = [Magmom(m) for m in site_properties["magmom"]]
if not isinstance(msg, MagneticSpaceGroup):
msg = MagneticSpaceGroup(msg) # type: ignore
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not msg.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.parameters, msg.sg_symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are " "different!" % (len(species), len(coords))
)
if len(species) != len(magmoms):
raise ValueError(
"Supplied species and magmom lengths (%d vs %d) are " "different!" % (len(species), len(magmoms))
)
frac_coords = coords if not coords_are_cartesian else latt.get_fractional_coords(coords)
all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]
all_coords = [] # type: List[List[float]]
all_magmoms = [] # type: List[float]
all_site_properties = collections.defaultdict(list) # type: Dict[str, List]
for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)):
cc, mm = msg.get_orbit(c, m, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
all_magmoms.extend(mm)
for k, v in site_properties.items():
if k != "magmom":
all_site_properties[k].extend([v[i]] * len(cc))
all_site_properties["magmom"] = all_magmoms
return cls(latt, all_sp, all_coords, site_properties=all_site_properties)
@property
def charge(self) -> float:
"""
Overall charge of the structure
"""
if self._charge is None:
return super().charge
return self._charge
@property
def distance_matrix(self) -> np.ndarray:
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords, self.frac_coords)
@property
def sites(self) -> Tuple[PeriodicSite, ...]:
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self) -> Lattice:
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self) -> float:
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0) -> Tuple[str, int]:
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec, angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, anonymous=False, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
if not anonymous:
return m.fit(self, other)
return m.fit_anonymous(self, other)
def __eq__(self, other):
if other is self:
return True
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix):
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
skip_checks=True,
)
new_sites.append(s)
new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None
return Structure.from_sites(new_sites, charge=new_charge)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i: int, j: int, jimage=None) -> float:
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(
self,
pt: ArrayLike,
r: float,
include_index: bool = False,
include_image: bool = False,
) -> List[PeriodicNeighbor]:
"""
Find all sites within a sphere from the point, including a site (if any)
sitting on the point itself. This includes sites in other periodic
images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = [] # type: List[PeriodicNeighbor]
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(site_fcoords, pt, r):
nnsite = PeriodicNeighbor(
self[i].species,
fcoord,
self._lattice,
properties=self[i].properties,
nn_distance=dist,
image=img, # type: ignore
index=i,
)
neighbors.append(nnsite)
return neighbors
def get_neighbors(
self,
site: PeriodicSite,
r: float,
include_index: bool = False,
include_image: bool = False,
) -> List[PeriodicNeighbor]:
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
return self.get_all_neighbors(r, include_index=include_index, include_image=include_image, sites=[site])[0]
@deprecated(get_neighbors, "This is retained purely for checking purposes.")
def get_neighbors_old(self, site, r, include_index=False, include_image=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
nn = self.get_sites_in_sphere(site.coords, r, include_index=include_index, include_image=include_image)
return [d for d in nn if site != d[0]]
def _get_neighbor_list_py(
self,
r: float,
sites: List[PeriodicSite] = None,
numerical_tol: float = 1e-8,
exclude_self: bool = True,
) -> Tuple[np.ndarray, ...]:
"""
A python version of getting neighbor_list. The returned values are a tuple of
numpy arrays (center_indices, points_indices, offset_vectors, distances).
Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is
translated by `offset_vectors[i]` lattice vectors, and the distance is
`distances[i]`.
Args:
r (float): Radius of sphere
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be conincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
exclude_self (bool): whether to exclude atom neighboring with itself within
numerical tolerance distance, default to True
Returns: (center_indices, points_indices, offset_vectors, distances)
"""
neighbors = self.get_all_neighbors_py(
r=r, include_index=True, include_image=True, sites=sites, numerical_tol=1e-8
)
center_indices = []
points_indices = []
offsets = []
distances = []
for i, nns in enumerate(neighbors):
if len(nns) > 0:
for n in nns:
if exclude_self and (i == n.index) and (n.nn_distance <= numerical_tol):
continue
center_indices.append(i)
points_indices.append(n.index)
offsets.append(n.image)
distances.append(n.nn_distance)
return tuple(
(
np.array(center_indices),
np.array(points_indices),
np.array(offsets),
np.array(distances),
)
)
def get_neighbor_list(
self,
r: float,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
exclude_self: bool = True,
) -> Tuple[np.ndarray, ...]:
"""
Get neighbor lists using numpy array representations without constructing
Neighbor objects. If the cython extension is installed, this method will
be orders of magnitude faster than `get_all_neighbors_old` and 2-3x faster
than `get_all_neighbors`.
The returned values are a tuple of numpy arrays
(center_indices, points_indices, offset_vectors, distances).
Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is
translated by `offset_vectors[i]` lattice vectors, and the distance is
`distances[i]`.
Args:
r (float): Radius of sphere
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be conincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
exclude_self (bool): whether to exclude atom neighboring with itself within
numerical tolerance distance, default to True
Returns: (center_indices, points_indices, offset_vectors, distances)
"""
try:
from pymatgen.optimization.neighbors import (
find_points_in_spheres, # type: ignore
)
except ImportError:
return self._get_neighbor_list_py(r, sites, exclude_self=exclude_self) # type: ignore
else:
if sites is None:
sites = self.sites
site_coords = np.array([site.coords for site in sites], dtype=float)
cart_coords = np.ascontiguousarray(np.array(self.cart_coords), dtype=float)
lattice_matrix = np.ascontiguousarray(np.array(self.lattice.matrix), dtype=float)
r = float(r)
center_indices, points_indices, images, distances = find_points_in_spheres(
cart_coords,
site_coords,
r=r,
pbc=np.array([1, 1, 1], dtype=int),
lattice=lattice_matrix,
tol=numerical_tol,
)
cond = np.array([True] * len(center_indices))
if exclude_self:
self_pair = (center_indices == points_indices) & (distances <= numerical_tol)
cond = ~self_pair
return tuple(
(
center_indices[cond],
points_indices[cond],
images[cond],
distances[cond],
)
)
def get_all_neighbors(
self,
r: float,
include_index: bool = False,
include_image: bool = False,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
) -> List[List[PeriodicNeighbor]]:
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be conincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
Returns:
[[:class:`pymatgen.core.structure.PeriodicNeighbor`], ..]
"""
if sites is None:
sites = self.sites
center_indices, points_indices, images, distances = self.get_neighbor_list(
r=r, sites=sites, numerical_tol=numerical_tol
)
if len(points_indices) < 1:
return [[]] * len(sites)
f_coords = self.frac_coords[points_indices] + images
neighbor_dict: Dict[int, List] = collections.defaultdict(list)
lattice = self.lattice
atol = Site.position_atol
all_sites = self.sites
for cindex, pindex, image, f_coord, d in zip(center_indices, points_indices, images, f_coords, distances):
psite = all_sites[pindex]
csite = sites[cindex]
if (
d > numerical_tol
or
# This simply compares the psite and csite. The reason why manual comparison is done is
# for speed. This does not check the lattice since they are always equal. Also, the or construct
# returns True immediately once one of the conditions are satisfied.
psite.species != csite.species
or (not np.allclose(psite.coords, csite.coords, atol=atol))
or (not psite.properties == csite.properties)
):
neighbor_dict[cindex].append(
PeriodicNeighbor(
species=psite.species,
coords=f_coord,
lattice=lattice,
properties=psite.properties,
nn_distance=d,
index=pindex,
image=tuple(image),
)
)
neighbors: List[List[PeriodicNeighbor]] = []
for i in range(len(sites)):
neighbors.append(neighbor_dict[i])
return neighbors
def get_all_neighbors_py(
self,
r: float,
include_index: bool = False,
include_image: bool = False,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
) -> List[List[PeriodicNeighbor]]:
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be conincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
Returns:
[[:class:`pymatgen.core.structure.PeriodicNeighbor`],...]
"""
if sites is None:
sites = self.sites
site_coords = np.array([site.coords for site in sites])
point_neighbors = get_points_in_spheres(
self.cart_coords,
site_coords,
r=r,
pbc=True,
numerical_tol=numerical_tol,
lattice=self.lattice,
)
neighbors: List[List[PeriodicNeighbor]] = []
for point_neighbor, site in zip(point_neighbors, sites):
nns: List[PeriodicNeighbor] = []
if len(point_neighbor) < 1:
neighbors.append([])
continue
for n in point_neighbor:
coord, d, index, image = n
if (d > numerical_tol) or (self[index] != site):
neighbor = PeriodicNeighbor(
species=self[index].species,
coords=coord,
lattice=self.lattice,
properties=self[index].properties,
nn_distance=d,
index=index,
image=tuple(image),
)
nns.append(neighbor)
neighbors.append(nns)
return neighbors
@deprecated(get_all_neighbors, "This is retained purely for checking purposes.")
def get_all_neighbors_old(self, r, include_index=False, include_image=False, include_site=True):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
include_site (bool): Whether to include the site in the returned
data. Defaults to True.
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
matrix = latt.matrix
neighbors = [list() for _ in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = np.dot(all_fcoords, matrix)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = np.dot(image, matrix) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
if include_site:
nnsite = PeriodicSite(
self[j].species,
coords[j],
latt,
properties=self[j].properties,
coords_are_cartesian=True,
skip_checks=True,
)
for i in indices[within_r]:
item = []
if include_site:
item.append(nnsite)
item.append(d[i])
if include_index:
item.append(j)
# Add the image, if requested
if include_image:
item.append(image)
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(
self, origin: ArrayLike, r: float, dr: float, include_index: bool = False, include_image: bool = False
) -> List[PeriodicNeighbor]:
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
Returns:
[NearestNeighbor] where Nearest Neighbor is a named tuple containing
(site, distance, index, image).
"""
outer = self.get_sites_in_sphere(origin, r + dr, include_index=include_index, include_image=include_image)
inner = r - dr
return [t for t in outer if t.nn_distance > inner]
def get_sorted_structure(
self, key: Optional[Callable] = None, reverse: bool = False
) -> Union["IStructure", "Structure"]:
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return self.__class__.from_sites(sites, charge=self._charge)
def get_reduced_structure(self, reduction_algo: str = "niggli") -> Union["IStructure", "Structure"]:
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}".format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__( # type: ignore
reduced_latt,
self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True,
to_unit_cell=True,
site_properties=self.site_properties,
charge=self._charge,
)
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(
self._lattice,
self.species_and_occu,
self.frac_coords,
charge=self._charge,
site_properties=props,
)
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(
PeriodicSite(
site.species,
frac_coords,
reduced_latt,
to_unit_cell=True,
properties=site_props,
skip_checks=True,
)
)
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites, charge=self._charge)
def interpolate(
self,
end_structure: Union["IStructure", "Structure"],
nimages: Union[int, Iterable] = 10,
interpolate_lattices: bool = False,
pbc: bool = True,
autosort_tol: float = 0,
) -> List[Union["IStructure", "Structure"]]:
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int,list): No. of interpolation images or a list of
interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
if not isinstance(nimages, collections.abc.Iterable):
images = np.arange(nimages + 1) / nimages
else:
images = nimages
# Check that both structures have the same species
for i, site in enumerate(self):
if site.species != end_structure[i].species:
raise ValueError(
"Different species!\nStructure 1:\n" + str(self) + "\nStructure 2\n" + str(end_structure)
)
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords, end_coords)
site_mappings = collections.defaultdict(list) # type: Dict[int, List[int]]
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError(
"Unable to reliably match structures "
"with auto_sort_tol = %f. unmapped indices "
"= %s" % (autosort_tol, unmapped_start_ind)
)
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError(
"Unable to reliably match structures "
"with auto_sort_tol = %f. More than one "
"site match!" % autosort_tol
)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0] # type: ignore
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is unitary (rotation), p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T, np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in images:
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x * lvec, lstart).T
lat = Lattice(l_a)
else:
lat = self.lattice
fcoords = start_coords + x * vec
structs.append(self.__class__(lat, sp, fcoords, site_properties=self.site_properties)) # type: ignore
return structs
def get_miller_index_from_site_indexes(self, site_ids, round_dp=4, verbose=True):
"""
Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
return self.lattice.get_miller_index_from_coords(
self.frac_coords[site_ids],
coords_are_cartesian=False,
round_dp=round_dp,
verbose=verbose,
)
def get_primitive_structure(
self, tolerance: float = 0.25, use_site_props: bool = False, constrain_latt: Union[List, Dict] = None
):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [list(a[1]) for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g]) for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[
[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in itertools.product(range(a), range(a), range(e))
]
)
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(math.gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites, grouped_fcoords, grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(
new_l,
new_sp,
new_coords,
site_properties=new_props,
coords_are_cartesian=False,
)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance,
use_site_props=use_site_props,
constrain_latt=constrain_latt,
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all(getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt):
return p
elif type(constrain_latt).__name__ == "dict":
if all(getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()): # type: ignore
return p
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = [
"Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}".format(self.composition.reduced_formula),
]
def to_s(x):
return "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10) for i in self.lattice.angles]))
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
outs.append(
tabulate(
data,
headers=["#", "SP", "a", "b", "c"] + keys,
)
)
return "\n".join(outs)
def get_orderings(self, mode: str = "enum", **kwargs) -> List["Structure"]:
r"""
Returns list of orderings for a disordered structure. If structure
does not contain disorder, the default structure is returned.
Args:
mode (str): Either "enum" or "sqs". If enum,
the enumlib will be used to return all distinct
orderings. If sqs, mcsqs will be used to return
an sqs structure.
kwargs: kwargs passed to either
pymatgen.command_line..enumlib_caller.EnumlibAdaptor
or pymatgen.command_line.mcsqs_caller.run_mcsqs.
For run_mcsqs, a default cluster search of 2 cluster interactions
with 1NN distance and 3 cluster interactions with 2NN distance
is set.
Returns:
List[Structure]
"""
if self.is_ordered:
return [self]
if mode.startswith("enum"):
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
adaptor = EnumlibAdaptor(self, **kwargs)
adaptor.run()
return adaptor.structures
if mode == "sqs":
from pymatgen.command_line.mcsqs_caller import run_mcsqs
if "clusters" not in kwargs:
disordered_sites = [site for site in self if not site.is_ordered]
subset_structure = Structure.from_sites(disordered_sites)
dist_matrix = subset_structure.distance_matrix
dists = sorted(set(dist_matrix.ravel()))
unique_dists = []
for i in range(1, len(dists)):
if dists[i] - dists[i - 1] > 0.1:
unique_dists.append(dists[i])
clusters = {(i + 2): d + 0.01 for i, d in enumerate(unique_dists) if i < 2}
kwargs["clusters"] = clusters
return [run_mcsqs(self, **kwargs).bestsqs]
raise ValueError()
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict,
"sites": [],
}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
def as_dataframe(self):
"""
Returns a Pandas dataframe of the sites. Structure level attributes are stored in DataFrame.attrs. Example:
Species a b c x y z magmom
0 (Si) 0.0 0.0 0.000000e+00 0.0 0.000000e+00 0.000000e+00 5
1 (Si) 0.0 0.0 1.000000e-07 0.0 -2.217138e-07 3.135509e-07 -5
"""
data = []
site_properties = self.site_properties
prop_keys = list(site_properties.keys())
for site in self:
row = [site.species] + list(site.frac_coords) + list(site.coords)
for k in prop_keys:
row.append(site.properties.get(k))
data.append(row)
import pandas as pd
df = pd.DataFrame(data, columns=["Species", "a", "b", "c", "x", "y", "z"] + prop_keys)
df.attrs["Reduced Formula"] = self.composition.reduced_formula
df.attrs["Lattice"] = self.lattice
return df
@classmethod
def from_dict(cls, d, fmt=None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
charge = d.get("charge", None)
return cls.from_sites(sites, charge=charge)
def to(self, fmt: str = None, filename=None, **kwargs) -> Optional[str]:
r"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
**kwargs: Kwargs passthru to relevant methods. E.g., This allows
the passing of parameters like symprec to the
CifWriter.__init__ method for generation of symmetric cifs.
Returns:
(str) if filename is None. None otherwise.
"""
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname.lower(), "*.cif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, **kwargs)
elif fmt == "mcif" or fnmatch(fname.lower(), "*.mcif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, write_magmoms=True, **kwargs)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
from pymatgen.io.vasp import Poscar
writer = Poscar(self, **kwargs)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
from pymatgen.io.cssr import Cssr
writer = Cssr(self) # type: ignore
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write("%s" % s)
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
from pymatgen.io.xcrysden import XSF
s = XSF(self).to_string()
if filename:
with zopen(fname, "wt", encoding="utf8") as f:
f.write(s)
return s
elif (
fmt == "mcsqs" or fnmatch(fname, "*rndstr.in*") or fnmatch(fname, "*lat.in*") or fnmatch(fname, "*bestsqs*")
):
from pymatgen.io.atat import Mcsqs
s = Mcsqs(self).to_string()
if filename:
with zopen(fname, "wt", encoding="ascii") as f:
f.write(s)
return s
elif fmt == "prismatic" or fnmatch(fname, "*prismatic*"):
from pymatgen.io.prismatic import Prismatic
s = Prismatic(self).to_string()
return s
elif fmt == "yaml" or fnmatch(fname, "*.yaml*") or fnmatch(fname, "*.yml*"):
if filename:
with zopen(filename, "wt") as f:
yaml.safe_dump(self.as_dict(), f)
return None
return yaml.safe_dump(self.as_dict())
else:
raise ValueError("Invalid format: `%s`" % str(fmt))
if filename:
writer.write_file(filename)
return None
return writer.__str__()
@classmethod
def from_str(cls, input_string: str, fmt: str, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.atat import Mcsqs
from pymatgen.io.cif import CifParser
from pymatgen.io.cssr import Cssr
from pymatgen.io.vasp import Poscar
from pymatgen.io.xcrysden import XSF
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False, read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
filename = str(filename)
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.exciting import ExcitingInput
from pymatgen.io.lmto import LMTOCtrl
from pymatgen.io.vasp import Chgcar, Vasprun
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*") or fnmatch(fname.lower(), "*.mcif*"):
return cls.from_str(contents, fmt="cif", primitive=primitive, sort=sort, merge_tol=merge_tol)
if fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or fnmatch(fname, "*.vasp"):
s = cls.from_str(
contents,
fmt="poscar",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(
contents,
fmt="cssr",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(
contents,
fmt="json",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(
contents,
fmt="yaml",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf", primitive=primitive, sort=sort, merge_tol=merge_tol)
elif fnmatch(fname, "input*.xml"):
return ExcitingInput.from_file(fname).structure
elif fnmatch(fname, "*rndstr.in*") or fnmatch(fname, "*lat.in*") or fnmatch(fname, "*bestsqs*"):
return cls.from_str(
contents,
fmt="mcsqs",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "CTRL*"):
return LMTOCtrl.from_file(filename=filename).structure
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(
self,
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = 0.0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None,
):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Species, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(
(
"The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates.",
)
)
sites = []
for i, _ in enumerate(species):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Molecule contains sites that are ", "less than 0.01 Angstrom apart!"))
self._charge = charge
nelectrons = 0.0
for site in sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecies):
nelectrons += sp.Z * amt # type: ignore
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of %d and spin multiplicity of %d is"
" not possible for this molecule" % (self._charge, spin_multiplicity)
)
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self) -> float:
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self) -> float:
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self) -> float:
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self) -> float:
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight = 0
for site in self:
wt = site.species.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self) -> Tuple[Site, ...]:
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(
cls, sites: Sequence[Site], charge: float = 0, spin_multiplicity: float = None, validate_proximity: bool = False
) -> Union["IMolecule", "Molecule"]:
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls(
[site.species for site in sites],
[site.coords for site in sites],
charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props,
)
def break_bond(self, ind1: int, ind2: int, tol: float = 0.2) -> Tuple[Union["IMolecule", "Molecule"], ...]:
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
clusters = [[self._sites[ind1]], [self._sites[ind2]]]
sites = [site for i, site in enumerate(self._sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return tuple(self.__class__.from_sites(cluster) for cluster in clusters)
def get_covalent_bonds(self, tol: float = 0.2) -> List[CovalentBond]:
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = [
"Full Formula (%s)" % self.composition.formula,
"Reduced Formula: " + self.composition.reduced_formula,
"Charge = %s, Spin Mult = %s" % (self._charge, self._spin_multiplicity),
"Sites (%d)" % len(self),
]
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i),
site.species_string,
" ".join([("%0.6f" % j).rjust(12) for j in site.coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": [],
}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d) -> dict:
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)
def get_distance(self, i: int, j: int) -> float:
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt: ArrayLike, r: float) -> List[Neighbor]:
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere
r (float): Radius of sphere.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
neighbors = []
for i, site in enumerate(self._sites):
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append(Neighbor(site.species, site.coords, site.properties, dist, i))
return neighbors
def get_neighbors(self, site: Site, r: float) -> List[Neighbor]:
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
nns = self.get_sites_in_sphere(site.coords, r)
return [nn for nn in nns if nn != site]
def get_neighbors_in_shell(self, origin: ArrayLike, r: float, dr: float) -> List[Neighbor]:
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [nn for nn in outer if nn.nn_distance > inner]
def get_boxed_structure(
self,
a: float,
b: float,
c: float,
images: ArrayLike = (1, 1, 1),
random_rotation: bool = False,
min_dist: float = 1.0,
cls=None,
offset: ArrayLike = None,
no_cross: bool = False,
reorder: bool = True,
) -> Union["IStructure", "Structure"]:
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
reorder: Whether to reorder the sites to be in electronegativity
order.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0, 0, 0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1], c * images[2], 90, 90, 90) # type: ignore
nimages = images[0] * images[1] * images[2] # type: ignore
all_coords: List[ArrayLike] = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(
list(range(images[0])), list(range(images[1])), list(range(images[2])) # type: ignore
):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c] # type: ignore
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0),
axis=np.random.rand(3),
angle=random.uniform(-180, 180),
)
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(all_coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(all_coords),
)
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
all_coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()} # type: ignore
if cls is None:
cls = Structure
if reorder:
return cls(
lattice,
self.species * nimages, # type: ignore
all_coords,
coords_are_cartesian=True,
site_properties=sprops,
).get_sorted_structure()
return cls(
lattice,
self.species * nimages, # type: ignore
coords,
coords_are_cartesian=True,
site_properties=sprops,
)
def get_centered_molecule(self) -> Union["IMolecule", "Molecule"]:
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(
self.species_and_occu,
new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties,
)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.babel import BabelMolAdaptor
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.xyz import XYZ
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any(fmt == r or fnmatch(fname.lower(), "*.{}*".format(r)) for r in ["gjf", "g03", "g09", "com", "inp"]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
if filename:
with zopen(filename, "wt", encoding="utf8") as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
if filename:
with zopen(fname, "wt", encoding="utf8") as f:
return yaml.safe_dump(self.as_dict(), f)
else:
return yaml.safe_dump(self.as_dict())
else:
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)", fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
return str(writer)
@classmethod
def from_str(cls, input_string: str, fmt: str):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.xyz import XYZ
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
d = yaml.safe_load(input_string)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string, file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
filename = str(filename)
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
if any(fnmatch(fname.lower(), "*.{}*".format(r)) for r in ["gjf", "g03", "g09", "com", "inp"]):
return cls.from_str(contents, fmt="g09")
if any(fnmatch(fname.lower(), "*.{}*".format(r)) for r in ["out", "lis", "log"]):
return GaussianOutput(filename).final_structure
if fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
if fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)", filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename, m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Cannot determine file type.")
class Structure(IStructure, collections.abc.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None # type: ignore
def __init__(
self,
lattice: Union[ArrayLike, Lattice],
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None,
):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super().__init__(
lattice,
species,
coords,
charge=charge,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
self._sites: List[PeriodicSite] = list(self._sites) # type: ignore
def __setitem__( # type: ignore
self, i: Union[int, slice, Sequence[int], SpeciesLike], site: Union[SpeciesLike, PeriodicSite, Sequence]
):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Species-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Species/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Species)):
self.replace_species({i: site}) # type: ignore
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites) if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice " "as Structure!")
if len(indices) != 1:
raise ValueError("Site assignments makes sense only for " "single int indices!")
self._sites[ii] = site # type: ignore
else:
if isinstance(site, str) or (not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site # type: ignore
else:
self._sites[ii].species = site[0] # type: ignore
if len(site) > 1:
self._sites[ii].frac_coords = site[1] # type: ignore
if len(site) > 2:
self._sites[ii].properties = site[2] # type: ignore
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
@property
def lattice(self) -> Lattice:
"""
:return: Lattice assciated with structure.
"""
return self._lattice
@lattice.setter
def lattice(self, lattice: Union[ArrayLike, Lattice]):
if not isinstance(lattice, Lattice):
lattice = Lattice(lattice)
self._lattice = lattice
for site in self._sites:
site.lattice = lattice
def append( # type: ignore
self,
species: CompositionLike,
coords: ArrayLike,
coords_are_cartesian: bool = False,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(
len(self),
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties,
)
def insert( # type: ignore
self,
i: int,
species: CompositionLike,
coords: ArrayLike,
coords_are_cartesian: bool = False,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice, properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing " "site!")
self._sites.insert(i, new_site)
def replace(
self,
i: int,
species: CompositionLike,
coords: ArrayLike = None,
coords_are_cartesian: bool = False,
properties: dict = None,
):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords # type: ignore
new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)
self._sites[i] = new_site
def substitute(self, index: int, func_group: Union["IMolecule", "Molecule", str], bond_order: int = 1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_group: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist, _, _ in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2, _, _ in self.get_neighbors(nn, 3):
if inn != self[index] and dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach" " functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if not isinstance(func_group, Molecule):
# Check to see whether the functional group is in database.
if func_group not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
fgroup = FunctionalGroups[func_group]
else:
fgroup = func_group
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
try:
bl = get_bond_length(non_terminal_nn.specie, fgroup[1].specie, bond_order=bond_order)
# Catches for case of incompatibility between Element(s) and Species(s)
except TypeError:
bl = None
if bl is not None:
fgroup = fgroup.copy()
vec = fgroup[0].coords - fgroup[1].coords
vec /= np.linalg.norm(vec)
fgroup[0] = "X", fgroup[1].coords + float(bl) * vec
# Align X to the origin.
x = fgroup[0]
fgroup.translate_sites(list(range(len(fgroup))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = fgroup[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
fgroup.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i, fg in enumerate(fgroup):
fgroup[i] = (fg.species, origin - (fg.coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in fgroup[1:]:
s_new = PeriodicSite(site.species, site.coords, self.lattice, coords_are_cartesian=True)
self._sites.append(s_new)
def remove_species(self, species: Sequence[SpeciesLike]):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(
PeriodicSite(
new_sp_occu,
site.frac_coords,
self._lattice,
properties=site.properties,
)
)
self._sites = new_sites
def remove_sites(self, indices: Sequence[int]):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites) if i not in indices]
def apply_operation(self, symmop: SymmOp, fractional: bool = False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(
site.species,
new_frac,
self._lattice,
properties=site.properties,
skip_checks=True,
)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(
site.species,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties,
skip_checks=True,
)
self._sites = [operate_site(s) for s in self._sites]
def apply_strain(self, strain: ArrayLike):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def sort(self, key: Callable = None, reverse: bool = False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites.sort(key=key, reverse=reverse)
def translate_sites(
self, indices: Union[int, Sequence[int]], vector: ArrayLike, frac_coords: bool = True, to_unit_cell: bool = True
):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.abc.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(site.coords + vector)
if to_unit_cell:
fcoords = np.mod(fcoords, 1)
self._sites[i].frac_coords = fcoords
def rotate_sites(
self,
indices: List[int] = None,
theta: float = 0.0,
axis: ArrayLike = None,
anchor: ArrayLike = None,
to_unit_cell: bool = True,
):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy import cross, eye
from numpy.linalg import norm
from scipy.linalg import expm
if indices is None:
indices = list(range(len(self)))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()
new_site = PeriodicSite(
site.species,
coords,
self._lattice,
to_unit_cell=to_unit_cell,
coords_are_cartesian=True,
properties=site.properties,
skip_checks=True,
)
self._sites[i] = new_site
def perturb(self, distance: float, min_distance: float = None):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
min_distance (None, int, or float): if None, all displacements will
be equal amplitude. If int or float, perturb each site a
distance drawn from the uniform distribution between
'min_distance' and 'distance'.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
dist = distance
if isinstance(min_distance, (float, int)):
dist = np.random.uniform(min_distance, dist)
return vector / vnorm * dist if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def make_supercell(self, scaling_matrix: ArrayLike, to_unit_cell: bool = True):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self * scaling_matrix
if to_unit_cell:
for site in s:
site.to_unit_cell(in_place=True)
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume: float):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.lattice = self._lattice.scale(volume)
def merge_sites(self, tol: float = 0.01, mode: str = "sum"):
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode (str): Three modes supported. "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
"average" means that the site is deleted but the properties are averaged
Only first letter is considered.
"""
mode = mode.lower()[0]
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import squareform
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)), tol, "distance")
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species
coords = self[inds[0]].frac_coords
props = self[inds[0]].properties
for n, i in enumerate(inds[1:]):
sp = self[i].species
if mode == "s":
species += sp
offset = self[i].frac_coords - coords
coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(coords.dtype)
for key in props.keys():
if props[key] is not None and self[i].properties[key] != props[key]:
if mode == "a" and isinstance(props[key], float):
# update a running total
props[key] = props[key] * (n + 1) / (n + 2) + self[i].properties[key] / (n + 2)
else:
props[key] = None
warnings.warn(
"Sites with different site property %s are merged. " "So property is set to none" % key
)
sites.append(PeriodicSite(species, coords, self.lattice, properties=props))
self._sites = sites
def set_charge(self, new_charge: float = 0.0):
"""
Sets the overall structure charge
Args:
new_charge (float): new charge to set
"""
self._charge = new_charge
class Molecule(IMolecule, collections.abc.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None # type: ignore
def __init__(
self,
species: Sequence[SpeciesLike],
coords: Sequence[ArrayLike],
charge: float = 0.0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None,
):
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Species, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
super().__init__(
species,
coords,
charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties,
)
self._sites: List[Site] = list(self._sites) # type: ignore
def __setitem__( # type: ignore
self, i: Union[int, slice, Sequence[int], SpeciesLike], site: Union[SpeciesLike, Site, Sequence]
):
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Species-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Species/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Species-like string/object, or finally a (Species,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Species)):
self.replace_species({i: site}) # type: ignore
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites) if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, str) or (not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site # type: ignore
else:
self._sites[ii].species = site[0] # type: ignore
if len(site) > 1:
self._sites[ii].coords = site[1] # type: ignore
if len(site) > 2:
self._sites[ii].properties = site[2] # type: ignore
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append( # type: ignore
self,
species: CompositionLike,
coords: ArrayLike,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(
len(self),
species,
coords,
validate_proximity=validate_proximity,
properties=properties,
)
def set_charge_and_spin(self, charge: float, spin_multiplicity: Optional[float] = None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0.0
for site in self._sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecies):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(self._charge, spin_multiplicity)
)
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def insert( # type: ignore
self,
i: int,
species: CompositionLike,
coords: ArrayLike,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing " "site!")
self._sites.insert(i, new_site)
def remove_species(self, species: Sequence[SpeciesLike]):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices: Sequence[int]):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites)) if i not in indices]
def translate_sites(self, indices: Sequence[int] = None, vector: ArrayLike = None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector = [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species, site.coords + vector, properties=site.properties) # type: ignore
self._sites[i] = new_site
def rotate_sites(
self, indices: Sequence[int] = None, theta: float = 0.0, axis: ArrayLike = None, anchor: ArrayLike = None
):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy import cross, eye
from numpy.linalg import norm
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((np.dot(rm, (site.coords - anchor).T)).T + anchor).ravel()
new_site = Site(site.species, s, properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance: float):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop: SymmOp):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species, new_cart, properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return self.__class__.from_sites(self)
def substitute(self, index: int, func_group: Union["IMolecule", "Molecule", str], bond_order: int = 1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for nn2 in self.get_neighbors(nn, 3):
if nn2 != self[index] and nn2.nn_distance < 1.2 * get_bond_length(nn.specie, nn2.specie):
all_non_terminal_nn.append(nn)
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach" " functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda nn: nn.nn_distance)
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_group, Molecule):
func_grp = func_group
else:
# Check to see whether the functional group is in database.
if func_group not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
func_grp = FunctionalGroups[func_group]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie, bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i, fg in enumerate(func_grp):
func_grp[i] = (fg.species, origin - (fg.coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
with open(os.path.join(os.path.dirname(__file__), "func_groups.json"), "rt") as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"]) for k, v in json.load(f).items()}
|
mit
|
toobaz/pandas
|
pandas/tests/indexing/multiindex/test_loc.py
|
1
|
13075
|
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def single_level_multiindex():
"""single level MultiIndex"""
return MultiIndex(
levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"]
)
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
class TestMultiIndexLoc:
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]),
dtype=np.float64,
)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series(
[], index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64)
)
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]),
dtype=np.float64,
)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series(
[], index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64)
)
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(data=[0, 1, 2], index=["A", "B", "C"], dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_loc_multiindex_labels(self):
df = DataFrame(
np.random.randn(3, 3),
columns=[["i", "i", "j"], ["A", "A", "B"]],
index=[["i", "i", "j"], ["X", "X", "Y"]],
)
# the first 2 rows
expected = df.iloc[[0, 1]].droplevel(0)
result = df.loc["i"]
tm.assert_frame_equal(result, expected)
# 2nd (last) column
expected = df.iloc[:, [2]].droplevel(0, axis=1)
result = df.loc[:, "j"]
tm.assert_frame_equal(result, expected)
# bottom right corner
expected = df.iloc[[2], [2]].droplevel(0).droplevel(0, axis=1)
result = df.loc["j"].loc[:, "j"]
tm.assert_frame_equal(result, expected)
# with a tuple
expected = df.iloc[[0, 1]]
result = df.loc[("i", "X")]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_ints(self):
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
expected = df.iloc[[0, 1]].droplevel(0)
result = df.loc[4]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_missing_label_raises(self):
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
with pytest.raises(KeyError, match=r"^2$"):
df.loc[2]
@pytest.mark.parametrize("key, pos", [([2, 4], [0, 1]), ([2], []), ([2, 3], [])])
def test_loc_multiindex_list_missing_label(self, key, pos):
# GH 27148 - lists with missing labels do not raise:
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
expected = df.iloc[pos]
result = df.loc[key]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_too_many_dims_raises(self):
# GH 14885
s = Series(
range(8),
index=MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
)
with pytest.raises(KeyError, match=r"^\('a', 'b'\)$"):
s.loc["a", "b"]
with pytest.raises(KeyError, match=r"^\('a', 'd', 'g'\)$"):
s.loc["a", "d", "g"]
with pytest.raises(IndexingError, match="Too many indexers"):
s.loc["a", "d", "g", "j"]
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ["Attribute" + str(i) for i in range(1)]
attribute_values = ["Value" + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(
np.arange(12).reshape(-1, 1),
index=MultiIndex.from_product([[1, 2, 3, 4], [1, 2, 3]]),
)
expected = df.loc[([1, 2],), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = Series(
np.arange(15, dtype="int64"),
MultiIndex.from_product([range(5), ["a", "b", "c"]]),
)
expected = s.loc[:, "a":"c"]
result = s.loc[0:4, "a":"c"]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, "a":"c"]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, "a":"c"]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = Series(
np.arange(15, dtype="int64"),
MultiIndex.from_product([range(5), ["a", "b", "c"]]),
)
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, "a":"c"]
tm.assert_series_equal(result, expected)
def test_get_loc_single_level(self, single_level_multiindex):
single_level = single_level_multiindex
s = Series(np.random.randn(len(single_level)), index=single_level)
for k in single_level.values:
s[k]
def test_loc_getitem_int_slice(self):
# GH 3053
# loc should treat integer slices like label slices
index = MultiIndex.from_tuples(
[t for t in itertools.product([6, 7, 8], ["a", "b"])]
)
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
expected = df
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples(
[t for t in itertools.product([10, 20, 30], ["a", "b"])]
)
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
expected = df.iloc[0:2]
expected.index = ["a", "b"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
expected = df[10]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer_type_1", (list, tuple, set, slice, np.ndarray, Series, Index)
)
@pytest.mark.parametrize(
"indexer_type_2", (list, tuple, set, slice, np.ndarray, Series, Index)
)
def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2):
# GH #19686
# .loc should work with nested indexers which can be
# any list-like objects (see `pandas.api.types.is_list_like`) or slices
def convert_nested_indexer(indexer_type, keys):
if indexer_type == np.ndarray:
return np.array(keys)
if indexer_type == slice:
return slice(*keys)
return indexer_type(keys)
a = [10, 20, 30]
b = [1, 2, 3]
index = MultiIndex.from_product([a, b])
df = DataFrame(
np.arange(len(index), dtype="int64"), index=index, columns=["Data"]
)
keys = ([10, 20], [2, 3])
types = (indexer_type_1, indexer_type_2)
# check indexers with all the combinations of nested objects
# of all the valid types
indexer = tuple(
convert_nested_indexer(indexer_type, k)
for indexer_type, k in zip(types, keys)
)
result = df.loc[indexer, "Data"]
expected = Series(
[1, 2, 4, 5], name="Data", index=MultiIndex.from_product(keys)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"indexer, pos",
[
([], []), # empty ok
(["A"], slice(3)),
(["A", "D"], slice(3)),
(["D", "E"], []), # no values found - fine
(["D"], []), # same, with single item list: GH 27148
(pd.IndexSlice[:, ["foo"]], slice(2, None, 3)),
(pd.IndexSlice[:, ["foo", "bah"]], slice(2, None, 3)),
],
)
def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, pos):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product(
[["A", "B", "C"], ["foo", "bar", "baz"]], names=["one", "two"]
)
s = Series(np.arange(9, dtype="int64"), index=idx).sort_index()
expected = s.iloc[pos]
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
def test_series_loc_getitem_fancy(multiindex_year_month_day_dataframe_random_data):
s = multiindex_year_month_day_dataframe_random_data["A"]
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns_indexer", [([], slice(None)), (["foo"], [])])
def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((["foo", "bar", "baz"], ["alpha", "beta"]))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_loc_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame(
[[np.mean, np.median], ["mean", "median"]],
columns=MultiIndex.from_tuples([("functs", "mean"), ("functs", "median")]),
index=["function", "name"],
)
result = df.loc["function", ("functs", "mean")]
expected = np.mean
assert result == expected
def test_loc_getitem_tuple_plus_slice():
# GH 671
df = DataFrame(
{
"a": np.arange(10),
"b": np.arange(10),
"c": np.random.randn(10),
"d": np.random.randn(10),
}
).set_index(["a", "b"])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
def test_loc_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_int_raises_exception(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
with pytest.raises(KeyError, match=r"^3$"):
df.loc[3]
def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
# test setup - check key not in dataframe
with pytest.raises(KeyError, match=r"^\('bar', 'three'\)$"):
df.loc[("bar", "three"), "B"]
# in theory should be inserting in a sorted space????
df.loc[("bar", "three"), "B"] = 0
expected = 0
result = df.sort_index().loc[("bar", "three"), "B"]
assert result == expected
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/tests/frame/common.py
|
13
|
4708
|
import numpy as np
from pandas import compat
from pandas.util._decorators import cache_readonly
import pandas.util.testing as tm
import pandas as pd
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = pd.DataFrame(_seriesd)
_frame2 = pd.DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = pd.DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = pd.DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class TestData(object):
@cache_readonly
def frame(self):
return _frame.copy()
@cache_readonly
def frame2(self):
return _frame2.copy()
@cache_readonly
def intframe(self):
# force these all to int64 to avoid platform testing issues
return pd.DataFrame(dict([(c, s) for c, s in
compat.iteritems(_intframe)]),
dtype=np.int64)
@cache_readonly
def tsframe(self):
return _tsframe.copy()
@cache_readonly
def mixed_frame(self):
return _mixed_frame.copy()
@cache_readonly
def mixed_float(self):
return pd.DataFrame({'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64')})
@cache_readonly
def mixed_float2(self):
return pd.DataFrame({'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64')})
@cache_readonly
def mixed_int(self):
return pd.DataFrame({'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']), dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64')})
@cache_readonly
def all_mixed(self):
return pd.DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'float32': np.array([1.] * 10, dtype='float32'),
'int32': np.array([1] * 10, dtype='int32')},
index=np.arange(10))
@cache_readonly
def tzframe(self):
result = pd.DataFrame({'A': pd.date_range('20130101', periods=3),
'B': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'C': pd.date_range('20130101', periods=3,
tz='CET')})
result.iloc[1, 1] = pd.NaT
result.iloc[1, 2] = pd.NaT
return result
@cache_readonly
def empty(self):
return pd.DataFrame({})
@cache_readonly
def ts1(self):
return tm.makeTimeSeries(nper=30)
@cache_readonly
def ts2(self):
return tm.makeTimeSeries(nper=30)[5:]
@cache_readonly
def simple(self):
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
return pd.DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
# self.ts3 = tm.makeTimeSeries()[-5:]
# self.ts4 = tm.makeTimeSeries()[1:-1]
def _check_mixed_float(df, dtype=None):
# float16 are most likely to be upcasted to float32
dtypes = dict(A='float32', B='float32', C='float16', D='float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([(k, dtype) for k, v in dtypes.items()])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype=None):
dtypes = dict(A='int32', B='uint64', C='uint8', D='int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([(k, dtype) for k, v in dtypes.items()])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
|
bsd-3-clause
|
IGITUGraz/spore-nest-module
|
examples/pattern_matching_showcase/python/snn_utils/plotter/backends/tk.py
|
3
|
4031
|
from __future__ import absolute_import
try:
from tkinter import *
from tkinter.ttk import *
except ImportError:
from TKinter import *
from ttk import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import snn_utils.plotter as plotter
class FormFrame(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self._row_count = 0
def add_row(self, label_text, widget_funcs):
Label(self, text=label_text, anchor=W).grid(row=self._row_count, column=0)
row = Frame(self)
row.grid(row=self._row_count, column=1, padx=5, pady=5)
widgets = []
for widget_func in widget_funcs:
widget = widget_func(row)
widget.pack(side=LEFT, expand=YES, fill=X)
widgets.append(widget)
self._row_count += 1
return widgets
class TkPlotCanvas(FigureCanvasTkAgg):
def __init__(self, fig, *args, **kwargs):
FigureCanvasTkAgg.__init__(self, fig, *args, **kwargs)
self._fig = fig
def resize(self, event):
FigureCanvasTkAgg.resize(self, event)
self._fig.tight_layout()
class PlotFrame(plotter.PlotWindow, Frame):
def __init__(self, tk_parent, plot_builder, data_source, max_time_window=None):
plotter.PlotWindow.__init__(self, plot_builder, data_source, max_time_window, quick_draw=False)
Frame.__init__(self, tk_parent)
self._tk_canvas = TkPlotCanvas(self.get_figure(), master=tk_parent)
self._tk_canvas.get_tk_widget().pack(fill=BOTH, expand=1)
self._enabled_var = BooleanVar()
self._enabled_var.set(True)
self._plot_invalidated = True
def _create_figure(self):
return Figure()
def _draw(self):
self._tk_canvas.draw()
def draw(self):
if self._enabled_var.get() and self._plot_invalidated:
plotter.PlotWindow.draw(self)
# self._plot_invalidated = False
def invalidate(self):
self._plot_invalidated = True
def get_enabled_variable(self):
return self._enabled_var
class PlotFrameControl(Frame):
def __init__(self, parent, plot_frame):
Frame.__init__(self, parent)
self._plot_frame = plot_frame
self._sim_time_scale_track_max_threshold = 1.0
self._sim_time_scale = Scale(self, orient=HORIZONTAL, from_=0, to=0,
command=self._set_sim_time)
self._sim_time_scale.pack(fill=X)
control_frame = FormFrame(self)
self._sim_time_label, = control_frame.add_row("Sim. Time:", [lambda p: Label(p)])
self._update_frequency_entry, = control_frame.add_row("Frequency:", [lambda p: Entry(p)])
self._enabled_var = BooleanVar()
self._enabled_var.set(True)
self._auto_update_btn = Checkbutton(control_frame, text="Auto Update",
onvalue=True, offvalue=False, variable=self._enabled_var)
self._auto_update_btn.grid(row=0, rowspan=2, column=2, sticky=W + E + N + S)
self._plot_invalidated = True
control_frame.pack(fill=NONE, side=BOTTOM)
def _set_sim_time(self, sim_time):
sim_time = float(sim_time)
if self._data_source.get_max_time() - sim_time < self._sim_time_scale_track_max_threshold:
self._max_time_to_show = None
else:
self._max_time_to_show = sim_time
self._plot_invalidated = True
def draw(self):
if self._plot_invalidated or self._max_time_to_show is None:
plotter.PlotWindow.draw(self)
self._plot_invalidated = False
self._sim_time_label.config(text="{:.2f}s".format(self._data_source.get_max_time()))
self._sim_time_scale.config(from_=self._data_source.get_min_time(), to=self._data_source.get_max_time())
if self._max_time_to_show is None:
self._sim_time_scale.set(self._data_source.get_max_time())
|
gpl-2.0
|
vitaliykomarov/NEUCOGAR
|
nest/noradrenaline/nest-2.10.0/topology/doc/user_manual_scripts/connections.py
|
9
|
18038
|
# -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks == None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0]-ext[0]/2.+dx/2. + dx*np.arange(top['columns'])
yticks = ctr[1]-ext[1]/2.+dy/2. + dy*np.arange(top['rows'])
if xlim == None:
xlim = [ctr[0]-ext[0]/2.-dx/2., ctr[0]+ext[0]/2.+dx/2.] # extra space so extent is visible
ylim = [ctr[1]-ext[1]/2.-dy/2., ctr[1]+ext[1]/2.+dy/2.]
else:
ext = [xlim[1]-xlim[0], ylim[1]-ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim,ylim=ylim,xticks=xticks,yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l),'red'),
(tp.FindNearestElement(l, [4.,5.]),'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc),'red'),
(tp.FindNearestElement(lpbc, [4.,5.]),'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0,0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5,1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5,.5,1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11.,11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc,projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left' : [-2.,-1.,-1.],
'upper_right': [ 2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5,1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5,1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
import numpy as np
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1,51], ylim=[0,1], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos == None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:,0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25.,0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25.,0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',rpos=[25.,0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',rpos=[25.,0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0.,.5], ylim=[0,3.5], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs,tgts))
ax.hist(dist, bins=50, histtype='stepfilled',normed=True)
r=np.arange(0.,0.51,0.01)
plt.plot(r, 2*np.pi*r*(1-2*r)*12/np.pi,'r-',lw=3,zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1.,1.),np.random.uniform(-1.,1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2.,2.],
'elements': 'iaf_neuron', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrns = tp.CreateLayer({'rows' : 20,
'columns' : 20,
'elements': 'iaf_neuron'})
stim = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrns, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrns, rec, cdict_rec)
#{ end #}
|
gpl-2.0
|
xubenben/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
261
|
4490
|
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
|
bsd-3-clause
|
YinongLong/scikit-learn
|
sklearn/decomposition/nmf.py
|
15
|
47073
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
INTEGER_TYPES = (numbers.Integral, np.integer)
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W, np.sqrt(beta) * np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive integer;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W: {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
|
bsd-3-clause
|
RuanAragao/peach
|
tutorial/neural-networks/mapping-a-plane.py
|
6
|
2368
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/mapping-a-plane.py
# Using a neuron to map a plane
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# We import numpy, random and peach, as those are the libraries we will be
# using.
from numpy import *
import random
import peach as p
# Here, we create a FeedForward network with only one layer, with two inputs and
# one output. Since it is only one output, there is only one neuron in the
# layer. We use LMS as the learning algorithm, and the neuron must be biased.
# Notice that we use 0.02 as the learning rate for the algorithm.
nn = p.FeedForward((2, 1), lrule=p.LMS(0.02), bias=True)
# These lists will track the values of the synaptic weights and the error. We
# will use it later to plot the convergence, if the matplotlib module is
# available
w0 = [ ]
w1 = [ ]
w2 = [ ]
elog = [ ]
# We start by setting the error to 1, so we can enter the looping:
error = 1
while abs(error) > 1e-7: # Max error is 1e-7
x1 = random.uniform(-10, 10) # Generating an example
x2 = random.uniform(-10, 10)
x = array([ x1, x2 ], dtype = float)
d = -1 - 3*x1 + 2*x2 # Plane equation
error = nn.feed(x, d)
w0.append(nn[0].weights[0][0]) # Tracking error and weights.
w1.append(nn[0].weights[0][1])
w2.append(nn[0].weights[0][2])
elog.append(d - nn(x)[0, 0])
print "After %d iterations, we get as synaptic weights:" % (len(w0),)
print nn[0].weights
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``mapping-a-plane.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
vsize = 4
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(array(w0))
a1.plot(array(w1))
a1.plot(array(w2))
a1.plot(array(elog))
a1.set_ylim([-10, 10])
a1.legend([ "$w_0$", "$w_1$", "$w_2$", "$error$" ])
savefig("mapping-a-plane.png")
except ImportError:
pass
|
lgpl-2.1
|
btabibian/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
121
|
4301
|
from itertools import product
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert_equal(model.nbrs_.n_neighbors,
n_neighbors)
|
bsd-3-clause
|
RPGOne/Skynet
|
scikit-learn-0.18.1/sklearn/model_selection/tests/test_split.py
|
7
|
47406
|
"""Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from sklearn.utils.fixes import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ([ 1.]). "
"LeaveOneGroupOut expects at least 2.")
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([ 1.]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([0 1 2]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
assert_array_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
assert_array_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
assert_true(np.any(np.array(list(kf_iter_wrapped.split(X, y))) !=
np.array(list(kf_randomized_iter_wrapped.split(X, y)))))
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
|
bsd-3-clause
|
bikash/kaggleCompetition
|
microsoft malware/code/single_28.py
|
1
|
11104
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 01:55:47 2015
@author: marios michailidis
"""
# licence: FreeBSD
"""
Copyright (c) 2015, Marios Michailidis
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import random
import numpy as np
import scipy as spss
from scipy.sparse import csr_matrix
import sys
sys.path.append("../../xgboost/wrapper")
import xgboost as xgb
from sklearn.ensemble import ExtraTreesClassifier
thre=25
num_round=11500
lr=0.005
max_de=7
subsam=0.4
colsample_bytree=0.5
gamma =0.001
min_child_weight=0.05
seed=1
objective='multi:softprob'
param = {}
param['booster']= 'gbtree'#gblinear
param['objective'] = objective
param['bst:eta'] = lr
param['seed']=seed
param['bst:max_depth'] = max_de
param['eval_metric'] = 'auc'
param['bst:min_child_weight']=min_child_weight
param['silent'] = 1
param['nthread'] = thre
param['bst:subsample'] = subsam
param['num_class'] = 9
param['gamma'] = gamma
param['colsample_bytree']=colsample_bytree
def transform2dtos(D2,y2):
# transform a 2d array of predictions to single array
# we also change
d1=[]
y1=[]
for i in range (0,len(D2)):
for j in range (0,len(D2[0])):
d1.append(float(D2[i][j]))
if y2[i]==float(j):
y1.append(1.0)
else:
y1.append(0.0)
return d1,y1
""" print predictions in file"""
def printfilewithtarget(X, name):
print("start print the training file with target")
wfile=open(name + ".csv", "w")
for i in range (0, len(X)):
wfile.write(str(X[i][0]) )
for j in range (1, len(X[i])):
wfile.write("," +str(X[i][j]) )
wfile.write("\n")
wfile.close()
print("done")
""" the metric we are being tested on"""
def logloss_metric(p, y):
logloss=0
for i in range (0, len(p)):
for j in range (0,len(p[i])):
if y[i]==float(j):
logloss+= np.log(spss.maximum(spss.minimum(p[i][j],1-(1e-15) ),1e-15 ))
return -logloss/float(len(y))
"""Load a csv file"""
def load(name):
print("start reading file with target")
wfile=open(name , "r")
line=wfile.readline().replace("\n","")
splits=line.split(",")
datalen=len(splits)
wfile.close()
X = np.loadtxt(open( name), delimiter=',',usecols=range(0, datalen), skiprows=0)
print("done")
return np.array(X)
""" use to concatebate the various kfold sets together"""
def cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5, ind1, ind2, ind3, ind4 ,ind5, num):
if num==0:
xwhole=np.concatenate((x2,x3,x4,x5), axis=0)
yhol=np.concatenate((y2,y3,y4,y5), axis=0)
return x1,y1 ,ind1,xwhole,yhol
elif num==1:
xwhole=np.concatenate((x1,x3,x4,x5), axis=0)
yhol=np.concatenate((y1,y3,y4,y5), axis=0)
return x2,y2 ,ind2,xwhole,yhol
elif num==2:
xwhole=np.concatenate((x1,x2,x4,x5), axis=0)
yhol=np.concatenate((y1,y2,y4,y5), axis=0)
return x3,y3 ,ind3,xwhole,yhol
elif num==3:
xwhole=np.concatenate((x1,x2,x3,x5), axis=0)
yhol=np.concatenate((y1,y2,y3,y5), axis=0)
return x4,y4 ,ind4,xwhole,yhol
else :
xwhole=np.concatenate((x1,x2,x3,x4), axis=0)
yhol=np.concatenate((y1,y2,y3,y4), axis=0)
return x5,y5 ,ind5,xwhole,yhol
""" Splits data to 5 kfold sets"""
def split_array_in_5(array, seed):
random.seed(seed)
new_arra1=[]
new_arra2=[]
new_arra3=[]
new_arra4=[]
new_arra5=[]
indiceds1=[]
indiceds2=[]
indiceds3=[]
indiceds4=[]
indiceds5=[]
for j in range (0,len(array)):
rand=random.random()
if rand <0.2:
new_arra1.append(array[j])
indiceds1.append(j)
elif rand <0.4:
new_arra2.append(array[j])
indiceds2.append(j)
elif rand <0.6:
new_arra3.append(array[j])
indiceds3.append(j)
elif rand <0.8:
new_arra4.append(array[j])
indiceds4.append(j)
else :
new_arra5.append(array[j])
indiceds5.append(j)
#convert to numpy
new_arra1=np.array(new_arra1)
new_arra2=np.array(new_arra2)
new_arra3=np.array(new_arra3)
new_arra4=np.array(new_arra4)
new_arra5=np.array(new_arra5)
#return arrays and indices
return new_arra1,new_arra2,new_arra3,new_arra4,new_arra5,indiceds1,indiceds2,indiceds3,indiceds4,indiceds5
def scalepreds(prs):
for i in range (0, len(prs)):
suum=0.0
for j in range (0,9):
suum+=prs[i][j]
for j in range (0,9):
prs[i][j]/=suum
"""loads first columns of a file"""
def loadfirstcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(sp[0])
op.close()
return pred
"""loads last columns of a file"""
def loadlastcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(float(sp[len(sp)-1])-1.0)
op.close()
return pred
""" This is the main method"""
def main():
directory=''
train_file="train_28_std.csv"
test_file="test_28_std.csv"
SEED= 15
outset="Gert282xtra115k"
y= loadlastcolumn(directory+"trainLabels.csv")
ids=loadfirstcolumn(directory+"sampleSubmission.csv")
model=ExtraTreesClassifier(n_estimators=1000, criterion='entropy', max_depth=16, min_samples_split=2,min_samples_leaf=1, max_features=0.5,n_jobs=25, random_state=1)
X=load(train_file)
print ("train samples: %d columns: %d " % (len(X) , len(X[0])))
X_test=load(test_file)
print ("train samples: %d columns: %d" % (len(X_test) , len(X_test[0])))
number_of_folds=5 # repeat the CV procedure 10 times to get more precise results
train_stacker=[ [0.0 for d in range (0,9)] for k in range (0,len(X)) ]
test_stacker=[[0.0 for d in range (0,9)] for k in range (0,len(X_test))]
#label_stacker=[0 for k in range (0,len(X))]
#split trainingg
x1,x2,x3,x4,x5,in1,in2,in3,in4,in5=split_array_in_5(X, SEED)
y1,y2,y3,y4,y5,iny1,iny2,iny3,iny4,iny5=split_array_in_5(y, SEED)
#create target variable
mean_log = 0.0
for i in range(0,number_of_folds):
X_cv,y_cv,indcv,X_train,y_train=cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5,in1, in2, in3, in4 ,in5, i)
print (" train size: %d. test size: %d, cols: %d " % (len(X_train) ,len(X_cv) ,len(X_train[0]) ))
""" model XGBOOST classifier"""
xgmat = xgb.DMatrix( csr_matrix(X_train), label=y_train, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix( csr_matrix(X_cv), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_cv), 9).tolist()
scalepreds(preds)
"""now model scikit classifier"""
model.fit(X_train, y_train)
predsextra = model.predict_proba(X_cv)
scalepreds(predsextra)
for pr in range (0,len(preds)):
for d in range (0,9):
preds[pr][d]=preds[pr][d]*0.8 + predsextra[pr][d]*0.2
# compute Loglikelihood metric for this CV fold
loglike = logloss_metric( preds,y_cv)
print "size train: %d size cv: %d Loglikelihood (fold %d/%d): %f" % (len(X_train), len(X_cv), i + 1, number_of_folds, loglike)
mean_log += loglike
#save the results
no=0
for real_index in indcv:
for d in range (0,9):
train_stacker[real_index][d]=(preds[no][d])
no+=1
if (number_of_folds)>0:
mean_log/=number_of_folds
print (" Average M loglikelihood: %f" % (mean_log) )
xgmat = xgb.DMatrix( csr_matrix(X), label=y, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix(csr_matrix(X_test), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_test), 9 ).tolist()
scalepreds(preds)
#predicting for test
model.fit(X, y)
predsextra = model.predict_proba(X_test)
scalepreds(predsextra)
for pr in range (0,len(preds)):
for d in range (0,9):
test_stacker[pr][d]=preds[pr][d]*0.8 + predsextra[pr][d]*0.2
# === Predictions === #
print (" printing datasets ")
printfilewithtarget(train_stacker, outset + "train")
printfilewithtarget(test_stacker, outset + "test")
print("Write results...")
output_file = "submission_"+str( (mean_log ))+".csv"
print("Writing submission to %s" % output_file)
f = open(output_file, "w")
f.write("Id")# the header
for b in range (1,10):
f.write("," + str("Prediction" + str(b) ) )
f.write("\n")
for g in range(0, len(test_stacker)) :
f.write("%s" % ((ids[g])))
for prediction in test_stacker[g]:
f.write(",%f" % (prediction))
f.write("\n")
f.close()
print("Done.")
if __name__=="__main__":
main()
|
apache-2.0
|
peterbarker/ardupilot-1
|
Tools/mavproxy_modules/lib/magcal_graph_ui.py
|
108
|
8248
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
|
gpl-3.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/discriminant_analysis.py
|
7
|
28643
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
mit
|
jmetzen/scikit-learn
|
sklearn/tests/test_dummy.py
|
186
|
17778
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
examples/gaussian_process/plot_gpc_isoprobability.py
|
45
|
3025
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = pl.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.clim(0, 1)
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
rexshihaoren/scikit-learn
|
sklearn/decomposition/truncated_svd.py
|
199
|
7744
|
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
|
bsd-3-clause
|
bmazin/ARCONS-pipeline
|
examples/Pal2012-cosmic/makemovieversion1.py
|
1
|
3076
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from util.ObsFile import ObsFile
from util.FileName import FileName
from util import utils
from util import hgPlot
from cosmic.Cosmic import Cosmic
import tables
from hotpix import hotPixels
from scipy.optimize import curve_fit
import pickle
from interval import interval, inf, imath
from cosmic import tsBinner
import sys
import os
run = 'PAL2012'
sundownDate = '20121211'
obsDate = '20121212'
# December 11
# Final sequence, toward end of run, thin high clouds at around 12:50, moved to confirm position at '122234', also at '130752' at 125s. (16,15)
seq5 = ['112709', '113212', '113714', '114216', '114718', '115220', '115722', '120224', '120727', '121229', '121732', '122234', '122736', '123238', '123740', '124242', '124744', '125246', '125748', '130250', '130752', '131254', '131756', '132258', '132800', '133303']
#seq5 = ['120727']
stride = 10
threshold = 100
nAboveThreshold = 0
npList = []
sigList = []
run = 'PAL2012'
sundownDate = '20121211'
obsDate = '20121212'
for seq in seq5:
inFile = open("cosmicTimeList-%s.pkl"%(seq),"rb")
cosmicTimeList = pickle.load(inFile)
binContents = pickle.load(inFile)
cfn = "cosmicMax-%s.h5"%seq
intervals = ObsFile.readCosmicIntervalFromFile(cfn)
for interval in intervals:
print "interval=",interval
fn = FileName(run, sundownDate,obsDate+"-"+seq)
obsFile = ObsFile(fn.obs())
obsFile.loadTimeAdjustmentFile(fn.timeAdjustments())
i0=interval[0]
i1=interval[1]
intervalTime = i1-i0
dt = intervalTime/2
beginTime = max(0,i0-0.000200)
endTime = beginTime + 0.001
integrationTime = endTime-beginTime
nBins = int(np.round(obsFile.ticksPerSec*(endTime-beginTime)+1))
timeHgValues = np.zeros(nBins, dtype=np.int64)
ymax = sys.float_info.max/100.0
for iRow in range(obsFile.nRow):
for iCol in range(obsFile.nCol):
gtpl = obsFile.getTimedPacketList(iRow,iCol,
beginTime,integrationTime)
ts = (gtpl['timestamps'] - beginTime)*obsFile.ticksPerSec
ts64 = np.round(ts).astype(np.uint64)
tsBinner.tsBinner(ts64, timeHgValues)
plt.clf()
plt.plot(timeHgValues)
x0 = (i0-beginTime)*obsFile.ticksPerSec
x1 = (i1-beginTime)*obsFile.ticksPerSec
plt.fill_between((x0,x1),(0,0), (ymax,ymax), alpha=0.2, color='red')
plt.yscale("symlog",linthreshy=0.9)
plt.xlim(0,1000)
#plt.ylim(-0.1,timeHgValues.max())
plt.ylim(-0.1,300)
tick0 = int(np.round(i0*obsFile.ticksPerSec))
plotfn = "cp-%05d-%s-%s-%s-%09d"%(timeHgValues.sum(),run,obsDate,seq,tick0)
plt.title(plotfn)
plt.savefig(plotfn+".png")
print "plotfn=",plotfn
print "to make movie now running this command:"
print "convert -delay 0 `ls -r cp*png` cp.gif"
os.system("convert -delay 0 `ls -r cp*png` cp.gif")
|
gpl-2.0
|
SymbiFlow/edalize
|
edalize/quartus_reporting.py
|
1
|
4331
|
"""Quartus-specific reporting routines
"""
import logging
import re
from typing import Dict, Union
from edalize.reporting import Reporting
logger = logging.getLogger(__name__)
# Reporting is an optional Edalize feature and its required packages may not
# be installed unless Edalize was installed as edalize[reporting]. There is
# currently reduced-functionality feedback, so if the module is used without
# being properly installed log a hopefully helpful error before throwing the
# exception.
import_msg = "Missing package %s. Was edalize installed with the reporting option? (pip install 'edalize[reporting]')"
# This would perhaps be cleaner but more complex with importlib
try:
import pyparsing as pp
except ImportError as e:
logger.exception(import_msg, "pyparsing")
raise e
try:
import pandas as pd
except ImportError as e:
logger.exception(import_msg, "pandas")
raise e
class QuartusReporting(Reporting):
# Override non-default class variables
_resource_rpt_pattern = "*.fit.rpt"
_timing_rpt_pattern = "*.sta.rpt"
_report_encoding = "ISO-8859-1"
@staticmethod
def _parse_tables(report_str: str) -> Dict[str, str]:
"""Parse the tables from a fitter report
Keys are the title of the table, values are the table body
"""
hline = pp.lineStart() + pp.Word("+", "+-") + pp.lineEnd()
title = (
pp.lineStart()
+ ";"
+ pp.SkipTo(";")("title").setParseAction(pp.tokenMap(str.strip))
+ ";"
+ pp.lineEnd()
)
# Grab everything until the next horizontal line(s). Tables with
# column headings will have a horizontal line after the headings and
# at the end of the table. Odd tables without section headings will
# only have a single horizontal line.
data = pp.SkipTo(hline, failOn=pp.lineEnd() * 2, include=True)
table = hline + title + pp.Combine(hline + data * (1, 2))("body")
# Make line endings significant
table.setWhitespaceChars(" \t")
result = {t.title: t.body for t in table.searchString(report_str)}
return result
@classmethod
def report_timing(cls, report_file: str) -> Dict[str, pd.DataFrame]:
return cls._report_to_df(cls._parse_tables, report_file)
@classmethod
def report_resources(cls, report_file: str) -> Dict[str, pd.DataFrame]:
return cls._report_to_df(cls._parse_tables, report_file)
@staticmethod
def report_summary(
resources: pd.DataFrame, timing: Dict[str, pd.DataFrame]
) -> Dict[str, Union[int, float]]:
util = resources["Fitter Resource Utilization by Entity"].iloc[0]
resource_buckets = {
"lut": ["Logic Cells", "Combinational ALUTs"],
"reg": ["Dedicated Logic Registers"],
"blkmem": ["M9Ks", "M10Ks", "M20Ks"],
"dsp": ["DSP Elements", "DSP Blocks"],
}
summary = {} # type: Dict[str, Union[int, float]]
# Resources in this table are mostly of the form 345.5 (123.3) and we
# want the first (total) number
for k, v in resource_buckets.items():
key = util.index.intersection(v)[0]
cell = util.at[key]
summary[k] = int(str(cell).split()[0])
# Get a frequency like 175.0 MHz and just return the numeric part
freq = timing["Clocks"].set_index("Clock Name")["Frequency"]
summary["constraint"] = (
freq.str.split(expand=True)[0].astype(float).to_dict()
)
# Find the Fmax summary table for the slowest corner, such as "Slow
# 1200mV 85C Model Fmax Summary". The voltage and temperature will
# depend on the device, so find the match with the highest
# temperature.
slow_fmax = re.compile(
r"Slow (?P<voltage>\d+)mV (?P<temp>\d+)C Model Fmax Summary"
)
title_matches = [slow_fmax.match(title) for title in timing.keys()]
slow_title = max(
[t for t in title_matches if t], key=lambda x: x.group("temp")
).string
fmax = timing[slow_title].set_index("Clock Name")["Restricted Fmax"]
series = fmax.str.split(expand=True)[0].astype(float)
summary["fmax"] = series.to_dict()
return summary
|
bsd-2-clause
|
murali-munna/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
143
|
22295
|
"""
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
|
bsd-3-clause
|
cauchycui/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
142
|
17496
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
bsd-3-clause
|
cowlicks/odo
|
odo/backends/sas.py
|
10
|
1321
|
from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape, var, Record, date_, datetime_
from collections import Iterator
import pandas as pd
from .pandas import coerce_datetimes
from ..append import append
from ..convert import convert
from ..resource import resource
@resource.register('.+\.(sas7bdat)')
def resource_sas(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
lines = f.readlines()
next(lines) # burn header
ln = next(lines)
types = map(discover, ln)
names = [col.name.decode("utf-8") for col in f.header.parent.columns]
return var * Record(list(zip(names, types)))
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
df = s.to_data_frame()
if any(typ in (date_, datetime_) for typ in dshape.measure.types):
df = coerce_datetimes(df)
names = [col.decode('utf-8') for col in s.column_names]
df = df[names] # Reorder names to match sasfile
return df
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s, **kwargs):
lines = s.readlines()
if not s.skip_header:
next(lines) # burn
return lines
|
bsd-3-clause
|
JamesDWatson/BoidsAssessment
|
build/lib/boids/command.py
|
2
|
1344
|
#!/usr/bin/env python
from argparse import ArgumentParser
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import yaml
import random
from .classes import Bird
def process():
parser = ArgumentParser(description = "Generate flock of boids")
parser.add_argument('--number', help='Specify the number of boids for the simulation')
arguments= parser.parse_args()
# Import variables from yaml file.
config=yaml.load(open("boids/config.yaml"))
no_boids = 50 #Need to get this from the command line.
# Initialise an array of boids, specified by the Bird class.
boid = [0] * no_boids
for x in range(no_boids):
boid[x] = Bird(config, no_boids)
figure=plt.figure()
axes=plt.axes(xlim=(config['x_lim'][0],config['x_lim'][1]), ylim=(config['y_lim'][0],config['y_lim'][1]))
scatter=axes.scatter([0] * no_boids,[0] * no_boids)
def animate(frame):
Bird.update_boids(no_boids, boid)
position = [0] * no_boids
for i in range(no_boids):
position[i] = (boid[i].x_pos, boid[i].y_pos)
scatter.set_offsets(list(position))
anim = animation.FuncAnimation(figure, animate,
frames=50, interval=50)
plt.show()
if __name__ =="__main__":
process()
|
mit
|
vivekmishra1991/scikit-learn
|
sklearn/linear_model/least_angle.py
|
61
|
54324
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Sklearn_scipy_numpy/source/numpy/lib/recfunctions.py
|
148
|
35012
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
mit
|
mjescobar/RF_Estimation
|
STA/helpers/molido/sta_withoutSR.py
|
2
|
23946
|
#!/usr/bin/env python
#============================================================
# STA FAST CHAIN
# SPIKE TRIGGERED AVERAGE (STA) ALGORITHM FAST VERSION
# Do STA for a list of Unit Cells.
# AASTUDILLO ABRIL 2014
# 29 ABRIL2014
#
# This script use as stimuli ensemble a mat file containing
# the stimuli in its true dimensions 20x20 px.
#============================================================
#============================================================
# Package import section:
#============================================================
#============================================================
# Package import section:
#============================================================
import matplotlib # Graph and plot library
matplotlib.use("Agg") # for save images without show it in windows (for server use)
import pylab as pl
import matplotlib.cm as cm # plot lib
import matplotlib.pyplot as plt # plot lib (for figures)
import mpl_toolkits.mplot3d.axes3d as p3d # 3D Plot lib
from matplotlib.pyplot import * # plot lib python
from matplotlib.ticker import NullFormatter # ticker formatter
#-----------------------------------------------------------
# Methods, Signal processing, etc:
#-----------------------------------------------------------
import scipy # numerical methods lib (like Matlab functions)
import scipy.io # input output lib (for save matlab matrix)
import scipy.signal as signal # signal processing lib
import numpy as npy # numerical methods lib
import sys # system lib
import os # operative system lib
import random # Random number methods
import time # System timer options
import scipy.misc as scim # scientific python package for image basic process
import glob # package for get file names from files in a folder
import matplotlib.pyplot as plt
from pylab import * # laboratory and plot methods lib
from scipy import misc
from PIL import Image, ImageChops
import argparse #argument parsing
from multiprocessing import Pool, freeze_support #Parallel powa!
#=============================================
# Inputs
#=============================================
parser = argparse.ArgumentParser(prog='sta.py',
description='Performs STA from a stimuli ensemble',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--getimagenames', default=0,
help='0,1 load image name list with the stimulus ensemble',
type=int, choices=[0, 1], required=False)
parser.add_argument('--openimagesandwrite', default=0,
help='0,1 DO TESTS FOR READ AND WRITE IMAGES',
type=int, choices=[0, 1], required=False)
parser.add_argument('--calculatemeanrf', default=0,
help='0,1 LOAD ALL THE IMAGES FROM THE STIMULOS ENSEMBLE '+
'AND CALCULATE THE MEAN STIMULUS',
type=int, choices=[0, 1], required=False)
parser.add_argument('--algorithm', default=4,
help='1,2,4 How to perform STA, '+
'1 load all spike triggered stimuli,'+
'2 for sequentially load'+
'4 from text',
type=int, choices=[1, 2], required=False)
parser.add_argument('--startUnit',
help='From which units should be processed',
type=int, default='0', required=False)
parser.add_argument('--endUnit',
help='Up to which units should be processed',
type=int, default='2', required=False)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, default='.', required=False)
parser.add_argument('--path',
help='Path to files (DEPRECATED, do not use)',
type=str, default='.', required=False)
parser.add_argument('--sourceFolder',
help='Folder with the files containing the timestamps for each unit',
type=str, default='.', required=True)
parser.add_argument('--fileTypeFilter',
help='Filter for the file containing timestamps',
type=str, default='*.txt')
parser.add_argument('--timefolder',
help='SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS (DEPRECATED, do not use)',
type=str, default='.', required=False)
parser.add_argument('--syncFile',
help='SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE'+
'IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME',
type=str, default='.', required=True)
parser.add_argument('--samplingRate',
help='ADQUISITION SAMPLING RATE FOR THE RECORDS',
type=int, default=20000, required=False)
parser.add_argument('--framesNumber',
help='NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALISE',
type=int, default=18, required=False)
parser.add_argument('--numberframespost',
help='number of frames posterior to each spike for STA windows',
type=int, default=2, required=False)
parser.add_argument('--xSize',
help='SIZE OF EACH FRAME IN PIXELS X',
type=int, default=31, required=False)
parser.add_argument('--ySize',
help='SIZE OF EACH FRAME IN PIXELS Y',
type=int, default=31, required=False)
parser.add_argument('--dolog',
help='0,1 logarithm analysis for plot',
type=int, default=0, choices=[0, 1], required=False)
parser.add_argument('--stimMiniv7',
help='Stimuli matrix Matlab v7 format',
type=str, required=False)
parser.add_argument('--stimMiniv73',
help='Stimuli matrix, ndarray format, check convertStim.py',
type=str, required=False)
parser.add_argument('--characterisation',
help='Characterisation',
type=str, required=False)
parser.add_argument('--numberProcesses',
help='Number of processes to spawn',
type=int, default=1, choices=[1,2,3,4,5,6], required=False)
args = parser.parse_args()
#=============================================
# GET SPIKE TIME FILE NAMES
#=============================================
archivosruta = args.path
#Source folder of the files with the timestamps
sourceFolder = args.sourceFolder
# Check for trailing / on the folder
if sourceFolder[-1] != '/':
sourceFolder+='/'
fileTypeFilter = args.fileTypeFilter.rsplit('.', 1)[1]
# FOLDER NAME TO SAVE RESULTS
outputFolder = args.outputFolder
if outputFolder[-1] != '/':
outputFolder+='/'
# SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
# IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
syncFile = args.syncFile
getimagenames = args.getimagenames #must be 0
openimagesandwrite = args.openimagesandwrite #must be 0
calculatemeanrf = args.calculatemeanrf #must be 0
tipoalgoritmo = args.algorithm
# SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS
timefolder = sourceFolder
# SET THE ADQUISITION SAMPLING RATE OF THE RECORDS
samplingRate = args.samplingRate # Hz
# SET THE NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALIZE:
# number of frames previous to each spike for STA windows
framesNumber = args.framesNumber
# number of frames posterior to each spike for STA windows
numberframespost = args.numberframespost
# SET THE SIZE OF EACH FRAME IN PIXELS
xSize = args.xSize #31
ySize = args.ySize #31
# set if do logarithm analysis for plot:
dolog = args.dolog
# SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
# IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
if not os.path.isfile(syncFile):
print ''
print 'File [' + syncFile + '] not found'
sys.exit()
inicio_fin_frame = npy.loadtxt(syncFile)
vector_inicio_frame = inicio_fin_frame[:,0]
vector_fin_frame = inicio_fin_frame[:,1]
# load image mat file stim_mini
stimMiniv7 = args.stimMiniv7
stimMiniv73 = args.stimMiniv73
# stimMiniv7 or stimMiniv73 must be provided
if not stimMiniv7 and not stimMiniv73:
print ''
print 'Either argument --stimMiniv7 or --stimMiniv73 is required'
sys.exit()
if stimMiniv7 and stimMiniv73:
print ''
print 'Only one argument (--stimMiniv7 or --stimMiniv73) must be provided'
sys.exit()
stimMatrix = stimMiniv73
if stimMiniv7:
stimMatrix = stimMiniv7
if not os.path.isfile(stimMatrix):
print ''
print 'File [' + stimMatrix + '] not found'
sys.exit()
lenSyncFile = len(vector_fin_frame)
if stimMiniv7:
ensemble = scipy.io.loadmat(stimMatrix)
estimulos = ensemble['stim']
lenEstimulos = estimulos.shape[3]
estim = npy.zeros(( xSize, ySize, lenEstimulos ))
if lenEstimulos < lenSyncFile:
lenSyncFile = lenEstimulos
for ke in range(lenEstimulos):
rgb = estimulos[:,:,:,ke]
gray = npy.dot(rgb[...,:3], [0.299, 0.587, 0.144])
estim[:,:,ke] = gray
estim = npy.array(estim)
else:
# stimMini must be prepared using convertStim.py
estim = npy.load(stimMatrix)
# same as choose channel 3 of RGB images
# ToDo, No idea why it's 2
canal = 2
# ToDo, because....
meanimagearray = npy.add.reduce(estim,axis=2) // (1.0* 100000)
startUnit = args.startUnit
endUnit = args.endUnit
# Dumb validation about limits
if startUnit < 0:
print ''
print 'startUnit can not be lesser than 0'
sys.exit()
if startUnit > endUnit:
print ''
print 'startUnit can not be lesser than endUnit'
sys.exit()
#vectores units y caracterizacion provienen de la tabla excel
#pero como no la tenemos...la primera vez se deben ignorar
per_row = []
for unitFile in os.listdir(sourceFolder):
if unitFile.rsplit('.', 1)[1] == fileTypeFilter:
per_row.append(unitFile.rsplit('.', 1)[0])
recoveredUnits = len(per_row)
if endUnit > recoveredUnits:
endUnit = recoveredUnits
#If the characterisationFile is not provided an array of length of the
# units must be provided.
characterisationFile = args.characterisation
if not characterisationFile:
characterization = npy.ones((len(per_row),), dtype=npy.int)
else:
if not os.path.isfile(characterisationFile):
print ''
print 'File [' + characterisationFile + '] not found'
sys.exit()
else:
characterization = npy.loadtxt(characterisationFile)
#Final lesser than Start
if endUnit > len(characterization):
endUnit=len(characterization)-startUnit
try:
os.mkdir( outputFolder )
except OSError:
pass
def sta_1():
# LOAD ALL THE FRAMES ACCORDING TO THE TIME STAMPS OF THE CELL
# NOT FUNCTIONAL ANYMORE
limite3 = len(stimei)
kframe = 0
spk = npy.zeros((500,500,framesNumber,limite3))
for kiter in range(limite3):
kframe = stimei[kiter]
for b in range(framesNumber):
print ' kiter: ',kiter, ' kframe: ',kframe, ' b: ',b
line = ifn2[kframe-(framesNumber-1)+ b ]
imagen = scim.imread(line, flatten=True)
spk[:,:,b,kiter] = imagen - meanimagearray
N = len(stimei)
STA = ( npy.add.reduce(spk,axis=3) / (1.0 * N) )
MEANSTA = ( npy.add.reduce(STA,axis=2) / (1.0 * framesNumber) )
def sta_2():
# LOAD EACH FRAME AND CALCULATES THE STA SEQUENTIALLY
timeAlgorithm2Ini = time.time()
kframe = 0
cadena_texto = "mean_image"
contenedor = scipy.io.loadmat(outputFolder+cadena_texto+'.mat')
meanimagearray = contenedor['meanimagearray']
del contenedor
xSize = 380
ySize = 380
acumula = npy.zeros((xSize,ySize,framesNumber+numberframespost))
print 'Get the spike triggered stimuli: \n '
for kiter in range(limite3):
timeProcessIni = time.time()
kframe = stimei[kiter]
for b in range(framesNumber+numberframespost):
line = ifn2[kframe-(framesNumber-1)+ b]
imagen = scim.imread( line, flatten=True )
acumula[:,:,b] = acumula[:,:,b] + (imagen - meanimagearray)
if kiter > len(stimei):
break
timeProcessFin = time.time()
tiempoDiferencia = timeProcessFin - timeProcessIni
sys.stdout.write("\r%d%%" %((kiter+1)*100.0/limite3, ) )
sys.stdout.flush()
N = limite3 # len(stimei)
STA = acumula // N
print ' \n '
minimosta = npy.min(npy.min(npy.min(STA)))
maximosta = npy.max(npy.max(npy.max(STA)))
print '\nmin sta ', minimosta, ' max sta ', maximosta
if minimosta < 0:
STA_desp = STA + npy.abs(minimosta) # lineal shift
if minimosta >= 0:
STA_desp = STA - npy.abs(minimosta) # lineal shift
minimosta_desp = npy.min(npy.min(npy.min(STA_desp)))
maximosta_desp = npy.max(npy.max(npy.max(STA_desp)))
print 'min sta with bias', minimosta_desp
print 'max sta with bias', maximosta_desp
stavisual_lin = STA_desp*255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
print 'min sta visual lineal', npy.min(npy.min(npy.min(stavisual_lin)))
print 'max sta visual lineal', npy.min(npy.max(npy.max(stavisual_lin)))
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = npy.add.reduce(stavisual_lin,axis=2)
timeAlgorithm2End = time.time()
timeAlgorithm2Total = timeAlgorithm2End - timeAlgorithm2Ini
print " Time process ", timeAlgorithm2Total, ' seg (', timeAlgorithm2Total/60, ' min)'
def sta_3():
timeAlgorithm3Ini = time.time()
print 'Get the spike triggered stimuli: \n '
sizechunk = 40
sizesmall = 20
acumula = npy.zeros((xSize,ySize,framesNumber+numberframespost))
if dosmall:
acumulaSmall = npy.zeros((sizesmall,sizesmall,framesNumber+numberframespost))
for kblock in range(npy.round(limite3/sizechunk)):
spk = npy.zeros((xSize,ySize,framesNumber+numberframespost,sizechunk))
if dosmall:
spkSmall = npy.zeros((sizesmall,sizesmall,framesNumber+numberframespost,sizechunk))
for kiter in range(sizechunk):
kframe = stimei[kiter+kblock*sizechunk]
for b in range(framesNumber+numberframespost):
line = ifn2[kframe-(framesNumber-1)+ b ]
imagen = scim.imread(line, flatten = True )
if dosmall:
imagenSmall = scipy.misc.imresize(imagen, [sizesmall,sizesmall] , interp = 'bilinear' , mode = None )
spk[:,:,b,kiter] = imagen
if dosmall:
spkSmall[:,:,b,kiter] = imagenSmall
del imagen
del line
if dosmall:
del imagenSmall
acuchunk = ( npy.add.reduce(spk,axis=3) )
acumula[:,:,:] = acumula[:,:,:] + acuchunk
if dosmall:
acuchunkSmall = ( npy.add.reduce(spkSmall,axis=3) )
acumulaSmall[:,:,:] = acumulaSmall[:,:,:] + acuchunkSmall
if kblock > npy.round(limite3/sizechunk):
break
sys.stdout.write("\r%d%%" % ((kblock+1)*100.0 /(npy.round(limite3/sizechunk)), ) )
sys.stdout.flush()
N = limite3
STA = acumula // N
for b in range(framesNumber+numberframespost):
STA[:,:,b] = STA[:,:,b] - meanimagearray
if dosmall:
meansmall = scipy.misc.imresize(meanimagearray,[sizesmall,sizesmall], interp='bilinear', mode=None)
STASmall = acumulaSmall // N
for b in range(framesNumber+numberframespost):
STASmall[:,:,b] = STASmall[:,:,b] - meansmall
print ' \n '
minimosta = npy.min(npy.min(npy.min(STA)))
maximosta = npy.max(npy.max(npy.max(STA)))
if minimosta < 0:
STA_desp = STA + npy.abs(minimosta) # lineal shift
if minimosta >= 0:
STA_desp = STA - npy.abs(minimosta) # lineal shift
minimosta_desp = npy.min(npy.min(npy.min(STA_desp)))
maximosta_desp = npy.max(npy.max(npy.max(STA_desp)))
stavisual_lin = STA_desp*255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = ( npy.add.reduce(stavisual_lin,axis=2) / (1.0 * (framesNumber+numberframespost) ) )
if dosmall:
minstasmall = npy.min(npy.min(npy.min(STASmall)))
maxstasmall = npy.max(npy.max(npy.max(STASmall)))
if minstasmall < 0:
STA_Small_desp = STASmall + npy.abs(minstasmall) # lineal shift
if minstasmall >= 0:
STA_Small_desp = STASmall - npy.abs(minstasmall) # lineal shift
minstasmall_desp = npy.min(npy.min(npy.min(STA_Small_desp)))
maxstasmall_desp = npy.max(npy.max(npy.max(STA_Small_desp)))
sta_small_visual_lin = STA_Small_desp * 255 # it is visualized with lineal scale
sta_small_visual_lin = sta_small_visual_lin // (maxstasmall_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEAN_STA_small_lin = ( npy.add.reduce(sta_small_visual_lin,axis=2) / (1.0 * (framesNumber+numberframespost) ) )
timeAlgorithm3End = time.time()
timeAlgorithm3Total = timeAlgorithm3End - timeAlgorithm3Ini
print " Time process ", timeAlgorithm3Total, ' seg (', timeAlgorithm3Total/60, ' min)'
def sta_4(args):
stimei = args
timeAlgorithm4Ini = time.time()
stac = npy.zeros( ( xSize,ySize, framesNumber+numberframespost ) ) # complete sta matrix
for numeroframe in range(framesNumber): #for 18 frames
bigsta18 = npy.zeros( ( xSize,ySize ) )
for kiter in range(len(stimei)):
bigsta18[:,:] = bigsta18[:,:] + estim[ :,:,stimei[kiter]-numeroframe ] - meanimagearray
sta18 = bigsta18 / (1.0 * len(stimei) ) # one part of the sta matrix
stac[:,:,framesNumber-1 - numeroframe] = sta18
acumula = npy.zeros((xSize,ySize,framesNumber+numberframespost))
STA = stac
#print ' \n '
minimosta = npy.min(npy.min(npy.min(STA)))
maximosta = npy.max(npy.max(npy.max(STA)))
STA_desp = STA - minimosta
minimosta_desp = npy.min(npy.min(npy.min(STA_desp)))
maximosta_desp = npy.max(npy.max(npy.max(STA_desp)))
stavisual_lin = STA_desp * 255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = ( npy.add.reduce(stavisual_lin,axis=2) / (1.0 * (framesNumber+numberframespost) ) )
timeAlgorithm4End = time.time()
timeAlgorithm4Total = timeAlgorithm4End - timeAlgorithm4Ini
#print " Time process ", timeAlgorithm4Total, ' seg (', timeAlgorithm4Total/60, ' min)'
#print '\nsize STA: ',len(STA),'x',len(STA[0]),'x',len(STA[0][0])
return (STA , stavisual_lin, MEANSTA_lin, STA_desp, acumula)
def calculaSTA(args):
start, finish = args
if finish > endUnit:
finish = endUnit
for kunit in range(start,finish):
timestampName = per_row[kunit]
if characterization[kunit] > 0:
print 'Analysing Unit ',timestampName #, ' loop :', c ,' unit n ', c + startUnit
#--------------------------------------------------------
# get spike time stamps from file
#--------------------------------------------------------
neurontag = timestampName # tag or number of cell
rastercelulatxtfile = timefolder + timestampName +'.txt'
timestamps = npy.loadtxt(rastercelulatxtfile) # text file containing time spikes in datapoints
neuronresultfolder_lin = str(neurontag)+'_lineal'
try:
os.mkdir( outputFolder+neuronresultfolder_lin ) # create the folder
except OSError:
pass
finalfolder_lin = outputFolder+neuronresultfolder_lin
#print 'size time stamps vector: ', len(timestamps) #, 'x',len(timestamps[0])
#--------------------------------------------------------
# get time spikes depending of the stimulus start (frame do not start in time=0)
#--------------------------------------------------------
#--------------------------------------------------------
# Conversion of spike times from seconds to POINTS:
#--------------------------------------------------------
#vector_spikes = timestamps[:]*samplingRate # without first id zero column (1 COLUMMN)
vector_spikes = timestamps[:] # without first id zero column (1 COLUMMN)
stimei = [] # initialize time spike index depending of image time
spikeframe_matrix = npy.zeros( (len(vector_spikes), 4) ) # [spike time, frame id, ini time frame, end time frame]
#--------------------------------------------------------
# convert stimes (SPIKE TIMES) to frame indexes (image index):
#--------------------------------------------------------
primer_frame = 0
frame_ant = 0
#print 'Get the spike triggered stimuli indices: \n'
contator = 0
contator2 = 0
totalcont = len(vector_spikes) * len(range(primer_frame, lenSyncFile))
for punto_spike in vector_spikes:
condicion = 1
for i in range(primer_frame, lenSyncFile):
if (vector_inicio_frame[i] < punto_spike) & (punto_spike <= vector_fin_frame[i]):
# if the spike time is into a frame time points (start and ends)
spikeframe_matrix[contator,0] = punto_spike
spikeframe_matrix[contator,1] = vector_fin_frame[i]
spikeframe_matrix[contator,2] = inicio_fin_frame[i,0]
spikeframe_matrix[contator,3] = inicio_fin_frame[i,1]
stimei.append(i)
frame_ant = i
break
sys.stdout.write("\r%d%%" %contator2)
sys.stdout.flush()
contator = contator + 1 #
contator2 = contator * 100 // ( 1.0 * len(vector_spikes) )
primer_frame = frame_ant
#print '\n'
limite3 = len(stimei)
print "Nro de frames: ", limite3
#print 'length frames times vector', lenSyncFile
#print "length time stamps vector: ", len(timestamps)
#print "length spike triggered stimuli time i vector: ", len(stimei)
#--------------------------------------------------------
# STA Algorithm
#--------------------------------------------------------
#------------------- ALGORITHM TYPE 1----------------------
if(tipoalgoritmo == 1):
sta_1()
#------------------- ALGORITHM TYPE 2----------------------
if(tipoalgoritmo == 2): # sequentially algorithm
sta_2()
dosmall = 0
#------------------- ALGORITHM TYPE 3----------------------
if(tipoalgoritmo == 3): # LOAD CHUNKS OF FRAMES AND CALCULATES THE STA SEQUENTIALLY
sta_3()
#===============================================================================
#------------------- ALGORITHM TYPE 4----------------------
if(tipoalgoritmo == 4): # LOAD entire matrix stimuli AND CALCULATES THE STA SEQUENTIALLY
STA , stavisual_lin , MEANSTA_lin, STA_desp, acumula = sta_4(stimei)
#----------------------------------------------------
# save spike time stamp and frame index
#----------------------------------------------------
spikeframe_matrix_array = npy.array(spikeframe_matrix)
spikeframe_filename = "spikeframe_matrix"+str(neurontag)
#print "Save spike frame matrix as mat file: ",spikeframe_filename
scipy.io.savemat(finalfolder_lin+'/'+spikeframe_filename+'.mat',mdict={'spikeframe_matrix':spikeframe_matrix_array},oned_as='column')
#----------------------------------------------------
# save true STA matrix (NON SCALED for visual plot)
#----------------------------------------------------
STA_array = npy.array(STA)
cadena_texto = "sta_array_"+str(neurontag)
#print "Saving NON rescaled STA as mat file: ",cadena_texto
scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STA_array':STA_array},oned_as='column')
#----------------------------------------------------
# save visual STA matrix ( RE SCALED for visual plot)
#----------------------------------------------------
stavisual_lin_array = npy.array(stavisual_lin)
cadena_texto = "stavisual_lin_array_"+str(neurontag)
#print "Saving visual STA (lineal) as mat file: ",cadena_texto
scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STAarray_lin':stavisual_lin_array},oned_as='column')
#print 'Saving images in lineal scale...'
plt.clf()
fig = plt.figure(1, figsize=(12,10))
ax = fig.add_subplot(3,6,1)
component = stavisual_lin[:,:,0]
ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_aspect(1)
kcontador = 2
for ksubplot in range(17):
ax = fig.add_subplot(3,6,kcontador)
component = stavisual_lin[:,:,kcontador-1]
ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
ax.set_aspect(1)
ax.set_yticklabels([])
ax.set_xticklabels([])
kcontador+=1
plt.savefig(finalfolder_lin+"/STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
plt.savefig(outputFolder+"STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
plt.show()
plt.clf()
plt.close()
#------------------------------------------------------
#print 'Saving mean image in lineal scale...'
pl.figure()
im = pl.pcolormesh(MEANSTA_lin,vmin = 0,vmax = 255, cmap=cm.jet)
pl.jet()
pl.colorbar(im)
ax = pl.axes()
ax.set_yticklabels([])
ax.set_xticklabels([])
pl.savefig(finalfolder_lin+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')
pl.close()
print 'CELL ' + timestampName + ' FINISHED!!!'
del STA_desp
del STA
del stavisual_lin
del spikeframe_matrix
del acumula
def main():
length = endUnit-startUnit
np = args.numberProcesses
p = Pool(processes=np)
p.map(calculaSTA, [(startPosition,startPosition+length//np) for startPosition in range(startUnit, length, length//np)])
if __name__=="__main__":
freeze_support()
main()
|
gpl-2.0
|
sameera2004/scikit-beam-examples
|
demos/reciprocal_space/recip_example.py
|
5
|
9494
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This is an example of using real experimental data for the six angles
(motor position) and image stack to plot the HK plane. Here
nsls2.recip.py -> process_to_q function is used to convert to Q
(reciprocal space) and then that data is gridded using nsls2.core.py
-> process_grid function.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.ma as ma
import os
import matplotlib.pyplot as plt
from skbeam.core import recip
from skbeam.core.utils import grid3d
import zipfile
import six
import time as ttime
def recip_ex(detector_size, pixel_size, calibrated_center, dist_sample,
ub_mat, wavelength, motors, i_stack, H_range, K_range, L_range):
# convert to Q space
q_values = recip.process_to_q(motors, detector_size, pixel_size,
calibrated_center, dist_sample,
wavelength, ub_mat)
# minimum and maximum values of the voxel
q_min = np.array([H_range[0], K_range[0], L_range[0]])
q_max = np.array([H_range[1], K_range[1], L_range[1]])
# no. of bins
dqn = np.array([40, 40, 1])
# process the grid values
(grid_data, grid_occu, std_err,
grid_out, bounds) = grid3d(q_values, i_stack, dqn[0], dqn[1],
dqn[2])
grid = np.mgrid[0:dqn[0], 0:dqn[1], 0:dqn[2]]
r = (q_max - q_min) / dqn
X = grid[0] * r[0] + q_min[0]
Y = grid[1] * r[1] + q_min[1]
Z = grid[2] * r[2] + q_min[2]
# creating a mask
_mask = grid_occu <= 10
grid_mask_data = ma.masked_array(grid_data, _mask)
grid_mask_std_err = ma.masked_array(std_err, _mask)
grid_mask_occu = ma.masked_array(grid_occu, _mask)
return X, Y, Z, grid_mask_data
def plot_slice(x, y, i_slice, lx, H_range, K_range):
# plot the HK plane
i_slice_range = [0.006, 0.0115]
f = plt.figure("HK plane", figsize=(7.5, 2.3))
plt.subplots_adjust(left=0.10, bottom=0.155555,
right=1.05, top=0.95,
wspace=0.2, hspace=0.45)
subp = f.add_subplot(111)
cnt = subp.contourf(x, y, i_slice, np.linspace(i_slice_range[0],
i_slice_range[1],
50, endpoint=True),
cmap='hot',
extend='both')
subp.axis('scaled')
subp.set_xlim(H_range)
subp.set_ylim(K_range)
subp.set_xlabel("H", size=10)
subp.set_ylabel("K", size=10)
subp.tick_params(labelsize=9)
cbar = plt.colorbar(cnt, ticks=np.linspace(i_slice_range[0],
i_slice_range[1],
3, endpoint=True),
format='%.4f')
cbar.ax.tick_params(labelsize=8)
def get_data(X, Y, grid_mask_data, plane):
HKL = 'HKL'
for i in plane:
HKL = HKL.replace(i, '')
HH = X[:, :, :].squeeze()
H = X[:, 0, 0]
KK = Y[:, :, :].squeeze()
K = Y[0, :, 0]
i_slice = grid_mask_data[:, :, :].squeeze() # intensity slice
lx = eval(plane[0])
return i_slice, lx
def run():
H_range = [-0.270, -0.200]
K_range = [+0.010, -0.010]
L_range = [+1.370, +1.410]
detector_size = (256, 256)
pixel_size = (0.0135*8, 0.0135*8) # (mm)
calibrated_center = (256/2.0, 256/2.0) # (mm)
dist_sample = 355.0 # (mm)
# ub matrix data
ub_mat = np. array([[-1.39772305e-01, -1.65559565e+00, -1.40501716e-02],
[-1.65632438e+00, 1.39853170e-01, -1.84650965e-04],
[4.79923390e-03, 4.91318724e-02, -4.72922724e-01]])
# wavelength data
wavelength = np.array([13.28559417])
# six angles data
motors = np.array([[102.3546, 77.874608, -90., 0., 0., 1.0205],
[102.6738, 78.285008, -90., 0., 0., 1.0575],
[102.9969, 78.696608, -90., 0., 0., 1.0945],
[103.3236, 79.108808, -90., 0., 0., 1.1315],
[103.6545, 79.521908, -90., 0., 0., 1.1685],
[103.9893, 79.935908, -90., 0., 0., 1.2055],
[104.3283, 80.350808, -90., 0., 0., 1.243],
[104.6712, 80.766608, -90., 0., 0., 1.28],
[105.018, 81.183308, -90., 0., 0., 1.317],
[105.369, 81.600908, -90., 0., 0., 1.354],
[105.7242, 82.019408, -90., 0., 0., 1.391],
[106.0836, 82.438808, -90., 0., 0., 1.428],
[106.4472, 82.859108, -90., 0., 0., 1.465],
[106.815, 83.280608, -90., 0., 0., 1.502],
[107.187, 83.703308, -90., 0., 0., 1.539],
[107.5632, 84.126608, -90., 0., 0., 1.576],
[107.9442, 84.551108, -90., 0., 0., 1.6135],
[108.3291, 84.976808, -90., 0., 0., 1.6505],
[108.7188, 85.403709, -90., 0., 0., 1.6875],
[109.113, 85.831509, -90., 0., 0., 1.7245],
[109.5117, 86.260509, -90., 0., 0., 1.7615],
[109.9149, 86.690709, -90., 0., 0., 1.7985],
[110.3229, 87.122109, -90., 0., 0., 1.8355],
[110.7357, 87.554709, -90., 0., 0., 1.8725],
[111.153, 87.988809, -90., 0., 0., 1.91],
[111.5754, 88.424109, -90., 0., 0., 1.947],
[112.0026, 88.860609, -90., 0., 0., 1.984],
[112.4349, 89.298609, -90., 0., 0., 2.021],
[112.8723, 89.737809, -90., 0., 0., 2.058],
[113.3145, 90.178809, -90., 0., 0., 2.095],
[113.7621, 90.621009, -90., 0., 0., 2.132],
[114.2151, 91.065009, -90., 0., 0., 2.169],
[114.6735, 91.510209, -90., 0., 0., 2.2065]])
# Data folder path
# path = "LSCO_Nov12_broker"
# intensity of the image stack data
run_location = os.getcwd()
rest_of_path = os.path.join(*__file__.split(os.sep)[:-1])
try:
i_stack = np.load(os.path.join(run_location, rest_of_path, "LSCO_Nov12_broker", "i_stack.npy"))
except IOError:
zipfile.ZipFile(os.path.join("LSCO_Nov12_broker.zip")).extractall()
i_stack = np.load(os.path.join("LSCO_Nov12_broker", "i_stack.npy"))
X, Y, Z, grid_mask_data = recip_ex(detector_size, pixel_size,
calibrated_center, dist_sample,
ub_mat, wavelength, motors, i_stack,
H_range, K_range, L_range)
i_slice, lx = get_data(X, Y, grid_mask_data, plane='HK')
x = X.reshape(40, 40)
y = Y.reshape(40, 40)
plot_slice(x, y, i_slice, lx, H_range, K_range)
plt.show()
ttime.sleep(1)
if __name__ == "__main__":
run()
|
bsd-3-clause
|
AhmedHani/Kaggle-Machine-Learning-Competitions
|
Medium/Denoising Dirty Documents/image_processing/background_removal.py
|
1
|
1347
|
__author__ = 'Ahmed Hani Ibrahim'
from PIL import Image
import numpy
from scipy import signal
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
from load_train_data import load_training_images
from load_test_data import load_testing_images
from save_testing_images import save_images
import PATH
KERNEL_SIZE = 15
training_images_path = PATH.TRAINING_DATA_DIR
testing_images_path = PATH.TESTING_DATA_DIR
training_images_collection = load_training_images(training_images_path)
testing_images_collection, images_id = load_testing_images(testing_images_path)
output_images = []
testing_cleaned_path = PATH.TESTING_RESULTS_IMAGE_PROCESSING
for i in range(0, len(testing_images_collection)):
print(i)
image = testing_images_collection[i]
#plt.imshow(image)
#plt.show()
background = signal.medfilt2d(image, KERNEL_SIZE)
#plt.imshow(background)
#plt.show()
#temp = image < background
mask_text = image < background - 0.1
new_image = numpy.where(mask_text, image, 1.0)
#plt.imshow(new_image)
#plt.show()
output_images.append(new_image)
save_images(testing_cleaned_path, output_images, images_id)
|
mit
|
jhanley634/testing-tools
|
problem/stores_and_cust/top_pop_cities.py
|
1
|
2884
|
#! /usr/bin/env python3
# Copyright 2019 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import sys
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib
matplotlib.use('Agg') # noqa E402
import matplotlib.pyplot as plt
import uszipcode
def get_populous_cities():
search = uszipcode.SearchEngine()
for r in search.by_population(1e5):
print(r.population,
r.post_office_city)
yield r.lat, r.lng
def draw_map():
def colorize_state(geometry):
return {'facecolor': (.94, .94, .86),
'edgecolor': (.55, .55, .55)}
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection=ccrs.PlateCarree())
ax.set_extent([-125, -66.5, 20, 50], ccrs.Geodetic())
shapename = 'admin_1_states_provinces_lakes_shp'
states_shp = shpreader.natural_earth(resolution='110m',
category='cultural', name=shapename)
ax.add_geometries(
shpreader.Reader(states_shp).geometries(),
ccrs.PlateCarree(),
styler=colorize_state)
ax.stock_img()
xs = []
ys = []
for lat, lng in get_populous_cities():
xs.append(lng)
ys.append(lat)
ax.plot(xs, ys, 'ok', transform=ccrs.PlateCarree(), markersize=8)
plt.savefig('/tmp/states.png')
# from https://stackoverflow.com/questions/8315389/print-fns-as-theyre-called
def tracefunc(frame, event, _, indent=[0]):
if event == "call":
indent[0] += 2
file = frame.f_code.co_filename.split('/')[-1]
print("-" * indent[0] + "> call function", frame.f_code.co_name, file)
elif event == "return":
print("<" + "-" * indent[0], "exit function", frame.f_code.co_name)
indent[0] -= 2
return tracefunc
if __name__ == '__main__':
sys.setprofile(None) # (tracefunc)
draw_map()
|
mit
|
MartinIsoz/CFD_oF
|
05_freibergExpSetUp/00_Scripts/fprepIC_noGravityV4.py
|
2
|
11596
|
#!/usr/bin/python
#FILE DESCRIPTION=======================================================
# Python function used to prepare initial condition for the rivulet
# spreading CFD.
#
# Based on the surrogate model proposed by ISOZ in [1]
#
# Script rewrites setFieldsDict in order to create an initial guess
# for the rivulet shape
#
# Script should be usable for DC05 and DC10 liquids (at low flow rates).
# For the case of higher flow rates, the model is not yet derived, for
# water, a different model (Towell and Rothfeld, [2]) should be
# implemented
#
# MODEL DESCRIPTION:
# - gravity affects the speed of movement of \tau along the plate
# - gravity does not have any effect on the profile shape and speed of
# spreading
# - at the time, model writes only the shape of GLI but not the
# corresponding velocity field (I do not know how to implement it
# via setFieldsDict - there might be a possibility to use
# funkySetFields utility but I did not look into it yet)
#
# NOTES:
# - from the setFieldsDict point of view, the interface (rivulet) is
# reconstructed by a series of cylinders
# - control the implementation !! (and as a matter of fact, also the
# model
# - the used model uses the member tan(beta) directly - it should be
# better suitable for the case of higher contact angles BUT its
# usability is still limited because of the assumptions used
# throughout its derivation
# - prepares also the inlet part of the geometry (based on an
# approximated empirical model
#
# USAGE:
# - it is a function callable by caseConstructor
#LICENSE================================================================
# fprepIC_noGravity.py
#
# Copyright 2015-2016 Martin Isoz <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
def fprepIC_noGravity(caseDir, #case directory name
a0,Q0, #initial conditions
liqName,l, #model defining properties
alpha,L,nCellsX,H,nCellsZ, #geometrical and meshing parameters
pltFlag, #output plots
):
#IMPORT BLOCK=======================================================
import math
import numpy as np
#~ from scipy.optimize import fsolve #NAE solver
from scipy.integrate import odeint #ODE solver
import matplotlib.pyplot as plt
#IMPORT BLOCK-CUSTOM================================================
from dfluidData import dfluidData
#LOCAL FUNCTIONS====================================================
def writeCylinder(h,a,x,deltaX):
# function returning list o strings to write cylinderToCell entry
# into setFieldsDict file
R = h/2.0 + a**2.0/(2.0*h) #cylinder diameter
d = R - h #how much bellow the plate is the cylinder center
# -- create the strings to return
cellStr = []
cellStr.append('\tcylinderToCell\n\t\t{\n') #entry openning lines
cellStr.append('\t\t\tp1 (%5.5e %5.5e %5.5e);\n'%(x,0.0,-d))
cellStr.append('\t\t\tp2 (%5.5e %5.5e %5.5e);\n'%(x+deltaX,0.0,-d))
cellStr.append('\t\t\tradius %5.5e;\n\n'%R)
cellStr.append('\t\t\tfieldValues\n\t\t\t(\n')
cellStr.append('\t\t\t\tvolScalarFieldValue alpha.liquid 1\n\t\t\t);\n')
cellStr.append('\t\t}\n') #entry ending line
return cellStr
def writeBox(h,deltah,a,x,deltaX,UVec,p_rgh):
# function returning list of strings to write boxToCell entry into
# setFieldsDict file
cellStr = []
cellStr.append('\tboxToCell\n\t\t{\n') #entry openning lines
cellStr.append('\t\t\tbox (%5.5e %5.5e %5.5e) (%5.5e %5.5e %5.5e);\n\n'%(x,-a,h,x+deltaX,a,h+deltah))
cellStr.append('\t\t\tfieldValues\n\t\t\t(\n')
cellStr.append('\t\t\t\tvolVectorFieldValue U (%5.5e %5.5e %5.5e)\n'%tuple(UVec))
cellStr.append('\t\t\t\tvolScalarFieldValue p_rgh %5.5e\n\t\t\t);\n'%p_rgh)
cellStr.append('\t\t}\n') #entry ending line
return cellStr
def writeBoxAlpha(h,deltah,a,x,deltaX,alpha):
# function returning list of strings to write boxToCell entry into
# setFieldsDict file
cellStr = []
cellStr.append('\tboxToCell\n\t\t{\n') #entry openning lines
cellStr.append('\t\t\tbox (%5.5e %5.5e %5.5e) (%5.5e %5.5e %5.5e);\n\n'%(x,-a,h,x+deltaX,a,h+deltah))
cellStr.append('\t\t\tfieldValues\n\t\t\t(\n')
cellStr.append('\t\t\t\tvolScalarFieldValue alpha.liquid %5.5e\n\t\t\t);\n'%alpha)
cellStr.append('\t\t}\n') #entry ending line
return cellStr
def writeRotBoxAlpha(origin,i,j,k,alpha):
# function returning list of strings to write boxToCell entry into
# setFieldsDict file
cellStr = []
cellStr.append('\trotatedBoxToCell\n\t\t{\n') #entry openning lines
cellStr.append('\t\t\torigin (%5.5e %5.5e %5.5e);\n'%tuple(origin))
cellStr.append('\t\t\ti (%5.5e %5.5e %5.5e);\n'%tuple(i))
cellStr.append('\t\t\tj (%5.5e %5.5e %5.5e);\n'%tuple(j))
cellStr.append('\t\t\tk (%5.5e %5.5e %5.5e);\n\n'%tuple(k))
cellStr.append('\t\t\tfieldValues\n\t\t\t(\n')
cellStr.append('\t\t\t\tvolScalarFieldValue alpha.liquid %5.5e\n\t\t\t);\n'%alpha)
cellStr.append('\t\t}\n') #entry ending line
return cellStr
##CONSTANTS=========================================================
# -- other physical properties
g = 9.81 #gravity
# -- liquid properties
[sigma,rho,mu,theta0,thetaA,thetaR] = dfluidData(liqName)
# -- calculation parameters
deltaX = L/nCellsX
deltaZ = H/nCellsZ
#MODEL DEFINITION=======================================================
# -- constants
psi = np.power(105.0*mu*Q0 / (4.0*rho*g*np.sin(alpha)),0.3333)
varpi = 2.0*mu/(rho*g*np.sin(alpha)*l**2.0)
# -- functions
betaFunc= lambda a : np.arctan(np.divide(psi,a**1.3333))
h0Func = lambda a,beta : 2.0*np.multiply(a,np.tan(beta)/2.0)
# -- model ODE (to be solved numerically)
def model(a,x):
beta = betaFunc(a)
dadx = 1.0*beta**3.0*sigma*varpi/(9.0*mu*np.log(a/(2.0*np.exp(2.0)*l)))
return dadx
def jac(a,x):
dfun = -(1.0/9.0)*np.arctan(psi/a**4.0)**2.0*sigma*(np.arctan(psi/a**4.0)*(a**8.0+psi**2.0)-12.0*a**4.0*psi*(ln(2.0)+2.0-ln(a/l)))*varpi/(mu*(ln(2.0)+2.0-ln(a/l))**2.0*(a**8.0+psi**2.0)*a)
return dfun
#SETFIELDSDICT EDITING==============================================
with open(caseDir + './system/setFieldsDict', 'r') as file:
# read a list of lines into data
data = file.readlines()
regsLine = 0 #at the time empty
# -- find position of regions keyword
for i in range(len(data)):
if data[i].find('regions') >-1:
auxData = data[0:i+2]
regsLine = i
break
# -- find the height of liquid surface at inlet
h = 2.0*math.pi/(5.0*alpha)*(6.0*Q0**2.0/g)**0.2
a0 = 2.0*h*np.sqrt(3.0)/3.0
# -- fill in the inlet by liquid
auxData.extend(writeRotBoxAlpha([-h,-10,0],[-10/np.tan(alpha),0,-10],[0,20,0],[10,0,-10/np.tan(alpha)],1.0))
#~ # -- connect the liquid in inlet with the liquid in rivulet
#~ auxData.extend(writeRotBoxAlpha([0,-a0,h],[-10*np.tan(alpha),0,-10],[0,2*a0,0],[10,0,-10*np.tan(alpha)],1.0))
# -- solve the model ODE
x = np.linspace(-h,L,nCellsX+int(round(nCellsX*h/L))+1) #create solution grid
aList = odeint(model,a0,x,Dfun=jac,printmessg=True) #solve the system
# -- auxiliary calculations
betaList = betaFunc(aList)
h0List = h0Func(aList,betaList)
# -- extend the list by the cylinders (gas-liquid interface position)
for i in range(nCellsX):
# -- prepare the phase fraction fields
cellStr = writeCylinder(h0List[i],aList[i],x.item(i),deltaX)
auxData.extend(cellStr)
# -- prepare the velocity field - as boxes
R = h0List.item(i)/2.0 + aList.item(i)**2.0/(2.0*h0List.item(i))
for j in range(1,int(math.ceil(nCellsZ*h0List.item(i)/H))):
uVec = [rho*g*np.sin(alpha)/(2.0*mu)*(2.0*h0List.item(i)*j*deltaZ - (j*deltaZ)**2.0),0.0,0.0]
#~ p_rgh = rho*g*np.cos(alpha)*(h0List.item(i) - (j*deltaZ)) + np.tan(betaList.item(i))/aList.item(i)*sigma
p_rgh = np.tan(betaList.item(i))/aList.item(i)*sigma
# -- get the current rivulet width (at height j*deltaZ)
c = np.sqrt((R-(h0List.item(i)-j*deltaZ)/2.0)*8.0*(h0List.item(i)-j*deltaZ))
cellStr = writeBox(j*deltaZ,deltaZ,c,x.item(i),deltaX,uVec,p_rgh)
auxData.extend(cellStr)
# -- extend the list by the rest of the lines
auxData.extend(data[regsLine+2::])
# rewrite the setFieldsDict file
with open(caseDir + './system/setFieldsDict', 'w') as file:
file.writelines( auxData )
# PLOTTING THE CHARACTERISTICS OF PRESET SOLUTION===================
if pltFlag:
plt.figure(num=None, figsize=(20, 12), dpi=80, facecolor='w', edgecolor='k')
plt.show(block=False)
plt.subplot(2,1,1)
plt.plot(betaList,'bo')
plt.title('Contact angle evolution along the rivulet')
plt.xlabel('step count')
plt.xlim(0,len(betaList))
plt.ylabel('apparent contact angle')
plt.subplot(2,2,3)
plt.plot(aList,'mo')
plt.title('Rivulet half width evolution')
plt.xlabel('step count')
plt.xlim(0,len(aList))
plt.ylabel('rivulet half width')
plt.subplot(2,2,4)
plt.plot(h0List,'co')
plt.title('Rivulet centerline height evolution')
plt.xlabel('step count')
plt.xlim(0,len(h0List))
plt.ylabel('rivulet centerline height')
plt.show()
|
gpl-2.0
|
pprett/statsmodels
|
statsmodels/sandbox/tsa/diffusion.py
|
6
|
18681
|
'''getting started with diffusions, continuous time stochastic processes
Author: josef-pktd
License: BSD
References
----------
An Algorithmic Introduction to Numerical Simulation of Stochastic Differential
Equations
Author(s): Desmond J. Higham
Source: SIAM Review, Vol. 43, No. 3 (Sep., 2001), pp. 525-546
Published by: Society for Industrial and Applied Mathematics
Stable URL: http://www.jstor.org/stable/3649798
http://www.sitmo.com/ especially the formula collection
Notes
-----
OU process: use same trick for ARMA with constant (non-zero mean) and drift
some of the processes have easy multivariate extensions
*Open Issues*
include xzero in returned sample or not? currently not
*TODOS*
* Milstein from Higham paper, for which processes does it apply
* Maximum Likelihood estimation
* more statistical properties (useful for tests)
* helper functions for display and MonteCarlo summaries (also for testing/checking)
* more processes for the menagerie (e.g. from empirical papers)
* characteristic functions
* transformations, non-linear e.g. log
* special estimators, e.g. Ait Sahalia, empirical characteristic functions
* fft examples
* check naming of methods, "simulate", "sample", "simexact", ... ?
stochastic volatility models: estimation unclear
finance applications ? option pricing, interest rate models
'''
import numpy as np
from scipy import stats, signal
import matplotlib.pyplot as plt
#np.random.seed(987656789)
class Diffusion(object):
'''Wiener Process, Brownian Motion with mu=0 and sigma=1
'''
def __init__(self):
pass
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t
class AffineDiffusion(Diffusion):
'''
differential equation:
:math::
dx_t = f(t,x)dt + \sigma(t,x)dW_t
integral:
:math::
x_T = x_0 + \\int_{0}^{T}f(t,S)dt + \\int_0^T \\sigma(t,S)dW_t
TODO: check definition, affine, what about jump diffusion?
'''
def __init__(self):
pass
def sim(self, nobs=100, T=1, dt=None, nrepl=1):
# this doesn't look correct if drift or sig depend on x
# see arithmetic BM
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dx = self._drift() + self._sig() * W
x = np.cumsum(dx,1)
xmean = x.mean(0)
return x, xmean, t
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
if dt is None:
dt = T*1.0/nobs
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dW = self.dW
t = np.linspace(dt, 1, nobs)
Dt = Tratio*dt;
L = nobs/Tratio; # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)); # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem
'''
R = 4; Dt = R*dt; L = N/R; % L EM steps of size Dt = R*dt
Xem = zeros(1,L); % preallocate for efficiency
Xtemp = Xzero;
for j = 1:L
Winc = sum(dW(R*(j-1)+1:R*j));
Xtemp = Xtemp + Dt*lambda*Xtemp + mu*Xtemp*Winc;
Xem(j) = Xtemp;
end
'''
class ExactDiffusion(AffineDiffusion):
'''Diffusion that has an exact integral representation
this is currently mainly for geometric, log processes
'''
def __init__(self):
pass
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
t = np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter doesn't handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.norm(loc=meant, scale=stdt)
class ArithmeticBrownian(AffineDiffusion):
'''
:math::
dx_t &= \\mu dt + \\sigma dW_t
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
return self.mu
def _sig(self, *args, **kwds):
return self.sigma
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
t = np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = self._drift * t
stdt = self._sigma * np.sqrt(t)
return stats.norm(loc=meant, scale=stdt)
class GeometricBrownian(AffineDiffusion):
'''Geometric Brownian Motion
:math::
dx_t &= \\mu x_t dt + \\sigma x_t dW_t
$x_t $ stochastic process of Geometric Brownian motion,
$\mu $ is the drift,
$\sigma $ is the Volatility,
$W$ is the Wiener process (Brownian motion).
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.mu * x
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
class OUprocess(AffineDiffusion):
'''Ornstein-Uhlenbeck
:math::
dx_t&=\\lambda(\\mu - x_t)dt+\\sigma dW_t
mean reverting process
TODO: move exact higher up in class hierarchy
'''
def __init__(self, xzero, mu, lambd, sigma):
self.xzero = xzero
self.lambd = lambd
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.lambd * (self.mu - x)
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
def exact(self, xzero, t, normrvs):
#TODO: aggregate over time for process with observations for all t
# i.e. exact conditional distribution for discrete time increment
# -> exactprocess
#TODO: for single t, return stats.norm -> exactdist
expnt = np.exp(-self.lambd * t)
return (xzero * expnt + self.mu * (1-expnt) +
self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd) * normrvs)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter doesn't handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
#TODO: aggregate over time for process with observations for all t
#TODO: for single t, return stats.norm
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self.mu * (1-expnt)
stdt = self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd)
from scipy import stats
return stats.norm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:])
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma
class SchwartzOne(ExactDiffusion):
'''the Schwartz type 1 stochastic process
:math::
dx_t = \\kappa (\\mu - \\ln x_t) x_t dt + \\sigma x_tdW \\
The Schwartz type 1 process is a log of the Ornstein-Uhlenbeck stochastic
process.
'''
def __init__(self, xzero, mu, kappa, sigma):
self.xzero = xzero
self.mu = mu
self.kappa = kappa
self.lambd = kappa #alias until I fix exact
self.sigma = sigma
def _exactconst(self, expnt):
return (1-expnt) * (self.mu - self.sigma**2 / 2. /self.kappa)
def _exactstd(self, expnt):
return self.sigma * np.sqrt((1-expnt*expnt)/2./self.kappa)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
lnxzero = np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
#TODO: check this is still wrong, just guessing
meant = np.log(xzero) * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.lognorm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]))
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,): mu = mu[0] # how to remove scalar array ?
if np.shape(sigma)== (1,): sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma
class BrownianBridge(object):
def __init__(self):
pass
def simulate(self, x0, x1, nobs, nrepl=1, ddt=1., sigma=1.):
nobs=nobs+1
dt = ddt*1./nobs
t = np.linspace(dt, ddt-dt, nobs)
t = np.linspace(dt, ddt, nobs)
wm = [t/ddt, 1-t/ddt]
#wmi = wm[1]
#wm1 = x1*wm[0]
wmi = 1-dt/(ddt-t)
wm1 = x1*(dt/(ddt-t))
su = sigma* np.sqrt(t*(1-t)/ddt)
s = sigma* np.sqrt(dt*(ddt-t-dt)/(ddt-t))
x = np.zeros((nrepl, nobs))
x[:,0] = x0
rvs = s*np.random.normal(size=(nrepl,nobs))
for i in range(1,nobs):
x[:,i] = x[:,i-1]*wmi[i] + wm1[i] + rvs[:,i]
return x, t, su
class CompoundPoisson(object):
'''nobs iid compound poisson distributions, not a process in time
'''
def __init__(self, lambd, randfn=np.random.normal):
if len(lambd) != len(randfn):
raise ValueError('lambd and randfn need to have the same number of elements')
self.nobj = len(lambd)
self.randfn = randfn
self.lambd = np.asarray(lambd)
def simulate(self, nobs, nrepl=1):
nobj = self.nobj
x = np.zeros((nrepl, nobs, nobj))
N = np.random.poisson(self.lambd[None,None,:], size=(nrepl,nobs,nobj))
for io in range(nobj):
randfnc = self.randfn[io]
nc = N[:,:,io]
#print nrepl,nobs,nc
#xio = randfnc(size=(nrepl,nobs,np.max(nc))).cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
rvs = randfnc(size=(nrepl,nobs,np.max(nc)))
print 'rvs.sum()', rvs.sum(), rvs.shape
xio = rvs.cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
#print xio.shape
x[:,:,io] = xio
x[N==0] = 0
return x, N
'''
randn('state',100) % set the state of randn
T = 1; N = 500; dt = T/N; t = [dt:dt:1];
M = 1000; % M paths simultaneously
dW = sqrt(dt)*randn(M,N); % increments
W = cumsum(dW,2); % cumulative sum
U = exp(repmat(t,[M 1]) + 0.5*W);
Umean = mean(U);
plot([0,t],[1,Umean],'b-'), hold on % plot mean over M paths
plot([0,t],[ones(5,1),U(1:5,:)],'r--'), hold off % plot 5 individual paths
xlabel('t','FontSize',16)
ylabel('U(t)','FontSize',16,'Rotation',0,'HorizontalAlignment','right')
legend('mean of 1000 paths','5 individual paths',2)
averr = norm((Umean - exp(9*t/8)),'inf') % sample error
'''
if __name__ == '__main__':
doplot = 1
nrepl = 1000
examples = []#['all']
if 'all' in examples:
w = Diffusion()
# Wiener Process
# ^^^^^^^^^^^^^^
ws = w.simulateW(1000, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(ws[0].T)
tmp = plt.plot(ws[0].mean(0), linewidth=2)
plt.title('Standard Brownian Motion (Wiener Process)')
func = lambda t, W: np.exp(t + 0.5*W)
us = w.expectedsim(func, nobs=500, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(us[0].T)
tmp = plt.plot(us[1], linewidth=2)
plt.title('Brownian Motion - exp')
#plt.show()
averr = np.linalg.norm(us[1] - np.exp(9*us[2]/8.), np.inf)
print averr
#print us[1][:10]
#print np.exp(9.*us[2][:10]/8.)
# Geometric Brownian
# ^^^^^^^^^^^^^^^^^^
gb = GeometricBrownian(xzero=1., mu=0.01, sigma=0.5)
gbs = gb.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(gbs.T)
tmp = plt.plot(gbs.mean(0), linewidth=2)
plt.title('Geometric Brownian')
plt.figure()
tmp = plt.plot(np.log(gbs).T)
tmp = plt.plot(np.log(gbs.mean(0)), linewidth=2)
plt.title('Geometric Brownian - log-transformed')
ab = ArithmeticBrownian(xzero=1, mu=0.05, sigma=1)
abs = ab.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(abs.T)
tmp = plt.plot(abs.mean(0), linewidth=2)
plt.title('Arithmetic Brownian')
# Ornstein-Uhlenbeck
# ^^^^^^^^^^^^^^^^^^
ou = OUprocess(xzero=2, mu=1, lambd=0.5, sigma=0.1)
ous = ou.simEM()
oue = ou.exact(1, 1, np.random.normal(size=(5,10)))
ou.exact(0, np.linspace(0,10,10/0.1), 0)
ou.exactprocess(0,10)
print ou.exactprocess(0,10, ddt=0.1,nrepl=10).mean(0)
#the following looks good, approaches mu
oues = ou.exactprocess(0,100, ddt=0.1,nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(oues.T)
tmp = plt.plot(oues.mean(0), linewidth=2)
plt.title('Ornstein-Uhlenbeck')
# SchwartsOne
# ^^^^^^^^^^^
so = SchwartzOne(xzero=0, mu=1, kappa=0.5, sigma=0.1)
sos = so.exactprocess(0,50, ddt=0.1,nrepl=100)
print sos.mean(0)
print np.log(sos.mean(0))
doplot = 1
if doplot:
plt.figure()
tmp = plt.plot(sos.T)
tmp = plt.plot(sos.mean(0), linewidth=2)
plt.title('Schwartz One')
print so.fitls(sos[0,:],dt=0.1)
sos2 = so.exactprocess(0,500, ddt=0.1,nrepl=5)
print 'true: mu=1, kappa=0.5, sigma=0.1'
for i in range(5):
print so.fitls(sos2[i],dt=0.1)
# Brownian Bridge
# ^^^^^^^^^^^^^^^
bb = BrownianBridge()
#bbs = bb.sample(x0, x1, nobs, nrepl=1, ddt=1., sigma=1.)
bbs, t, wm = bb.simulate(0, 0.5, 99, nrepl=500, ddt=1., sigma=0.1)
if doplot:
plt.figure()
tmp = plt.plot(bbs.T)
tmp = plt.plot(bbs.mean(0), linewidth=2)
plt.title('Brownian Bridge')
plt.figure()
plt.plot(wm,'r', label='theoretical')
plt.plot(bbs.std(0), label='simulated')
plt.title('Brownian Bridge - Variance')
plt.legend()
# Compound Poisson
# ^^^^^^^^^^^^^^^^
cp = CompoundPoisson([1,1], [np.random.normal,np.random.normal])
cps = cp.simulate(nobs=20000,nrepl=3)
print cps[0].sum(-1).sum(-1)
print cps[0].sum()
print cps[0].mean(-1).mean(-1)
print cps[0].mean()
print cps[1].size
print cps[1].sum()
#Note Y = sum^{N} X is compound poisson of iid x, then
#E(Y) = E(N)*E(X) eg. eq. (6.37) page 385 in http://ee.stanford.edu/~gray/sp.html
#plt.show()
|
bsd-3-clause
|
wltrimbl/kmerspectrumanalyzer
|
ksatools/graphit.py
|
1
|
7458
|
#!/usr/bin/env python
'''Tool to generate graphs of kmer spectra'''
COLORLIST = ["b", "g", "r", "c", "y",
"m", "k", "BlueViolet", "Coral", "Chartreuse",
"DarkGrey", "DeepPink", "LightPink"]
import sys
import argparse
import numpy as np
import matplotlib as mpl
import ksatools
def getcolor(index, colorlist):
if colorlist == []:
colorlist = COLORLIST
l = index % len(colorlist)
return colorlist[l]
def plotme(data, graphtype=None, label=None, n=0, opts={}, color=None, style="-", scale=1):
import matplotlib.pyplot as plt
# note, calccumsum will raise an exception here if data is invalid
if color == None:
color = getcolor(n, colorlist)
if label == "":
label = None
if "thickness" in opts.keys():
thickness = opts["thickness"]
else:
thickness = 1
if graphtype == "linear" or graphtype == None:
# if "markers" in opts.keys()
# pA = plt.plot(data[:, 0], data[:, 1], ".", color=color, label=label, linestyle=style)
# pA = plt.plot(s * data[:, 0], data[:, 1], color=color, label=label, linestyle=style)
pA = plt.plot(s* data[:, 0], data[:, 1])
legendloc = "upper right"
if graphtype == "semilogy":
if "dot" in opts.keys():
pA = plt.semilogy(
s * data[:, 0], data[:, 1], ".", color=color, label=label, linestyle=style)
pA = plt.semilogy(s * data[:, 0], data[:, 1], color=color,
label=None, linestyle=style, linewidth=thickness)
legendloc = "upper right"
if graphtype == "semilogx":
if "dot" in opts.keys():
pA = plt.semilogx(data[:, 0], data[:, 1], ".",
color=color, label=label, linestyle=style)
pA = plt.semilogx(s * data[:, 0], data[:, 1], color=color,
label=label, linestyle=style, linewidth=thickness)
legendloc = "upper right"
if graphtype == "loglog":
pA = plt.loglog(s * data[:, 0], data[:, 1], ".",
color=color, label=label, linestyle=style)
pA = plt.loglog(s * data[:, 0], data[:, 1], color=color,
label=None, linestyle=style, linewidth=thickness)
legendloc = "upper right"
if graphtype == "diff":
pA = plt.plot(data[1:, 0], np.exp(np.diff(np.log(data[:, 1]))) /
data[1:, 0], ".", color=color, label=label, linestyle=style)
pA = plt.plot(data[1:, 0], np.exp(np.diff(
np.log(data[:, 1]))) / data[1:, 0], color=color, label=None, linestyle=style)
legendloc = "upper right"
if not "suppress" in opts.keys() and opts["suppress"] is True:
plt.legend()
if "plotlegend" in opts.keys():
plt.gcf().suptitle(opts["plotlegend"], fontsize=24, x=0.03)
if "," in opts["xlim"]:
x1, x2 = opts["xlim"].split(",")
plt.xlim([float(x1), float(x2)])
plt.xlabel(opts["xlabel"], fontsize=18)
plt.ylabel(opts["ylabel"], fontsize=18)
plt.grid(1)
if __name__ == '__main__':
usage = "graphit.py <options> <arguments>"
parser = argparse.ArgumentParser(usage)
parser.add_argument("files", nargs='*', type=str)
parser.add_argument("-x", "--xlabel", dest="xlabel", action="store",
default="x label", help="")
parser.add_argument("-y", "--ylabel", dest="ylabel", action="store",
default="y label", help="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="verbose")
parser.add_argument("-o", "--outfile", dest="outfile", action="store",
default="test.png", help="dump table with outputs ")
parser.add_argument("-g", "--graphtype", dest="graphtype", action="store",
default=None, help="graph type")
parser.add_argument("-i", "--interactive", dest="interactive", action="store_true",
default=False, help="interactive mode--draw window")
parser.add_argument("-l", "--list", dest="filelist", required=True,
help="file containing list of targets and labels")
parser.add_argument("-t", "--thickness", dest="thickness",
default=2, help="line thickness for traces")
parser.add_argument("-w", "--writetype", dest="writetype",
default="pdf", help="file type for output (pdf,png)")
parser.add_argument("-p", "--plotlegend", dest="plotlegend",
default=None, help="Overall number at top of graph")
parser.add_argument("-s", "--suppresslegend", dest="suppress", action="store_true",
default=False, help="supress display of legend")
parser.add_argument("-n", "--name", dest="title",
default=None, help="Name for graph, graph title")
parser.add_argument("-c", "--scale", dest="scale",
default=False, action="store_true", help="Multiply by col 2")
parser.add_argument("--xlim", dest="xlim",
default="", type=str, help="xlimits: comma-separated")
parser.add_argument("--ylim", dest="ylim",
default="", type=str, help="ylimits: comma-separated")
parser.add_argument("-d", "--dot", dest="dot",
default=False, action="store_true", help="plot dots")
ARGS = parser.parse_args()
SCALE = ARGS.scale
if not ARGS.interactive:
mpl.use("Agg")
else:
mpl.use('TkAgg')
import matplotlib.pyplot as plt
listfile = ARGS.filelist
IN_FILE = open(listfile, "r").readlines()
numplots = len(IN_FILE)
n = 0
for line in IN_FILE:
if line[0] != "#":
a = line.strip().split("\t")
if len(a[0]) > 0:
if len(a) == 1:
a.append(a[0])
sys.stderr.write("%s %s \n" % (a[0], a[1]))
filename = a[0]
if len(a) == 3 or len(a) == 4:
selectedcolor = a[2]
else:
selectedcolor = getcolor(n, COLORLIST)
spectrum = ksatools.loadfile(filename)
if SCALE:
s = float(a[1])
else:
s = 1
if len(a) == 4:
selectedcolor = a[2]
selectedstyle = a[3]
plotme(spectrum, label=a[1], color=selectedcolor, scale=s,
style=selectedstyle, graphtype=ARGS.graphtype, opts=vars(ARGS))
else:
plotme(spectrum, label=a[1], color=selectedcolor, scale=s,
graphtype=ARGS.graphtype, opts=vars(ARGS))
n = n + 1
if ARGS.suppress == 0:
plt.legend(loc="upper left")
else:
for v in ARGS.files:
print(v)
filename = v
spectrum = ksatools.loadfile(filename)
plotme(spectrum, filename, opts=vars(ARGS),
color=COLORLIST[n], graphtype=ARGS.graphtype)
n = n + 1
# plt.legend(loc="upper left")
if ARGS.interactive:
plt.show()
if ARGS.outfile == "test.png":
sys.stderr.write("Warning! printing graphs in test.png!\n")
else:
sys.stderr.write("Printing graphs in " + ARGS.outfile + "\n")
plt.savefig(ARGS.outfile)
|
bsd-2-clause
|
micahcochran/geopandas
|
geopandas/tests/test_merge.py
|
2
|
1943
|
from __future__ import absolute_import
import pandas as pd
from shapely.geometry import Point
from geopandas import GeoDataFrame, GeoSeries
class TestMerging:
def setup_method(self):
self.gseries = GeoSeries([Point(i, i) for i in range(3)])
self.series = pd.Series([1, 2, 3])
self.gdf = GeoDataFrame({'geometry': self.gseries, 'values': range(3)})
self.df = pd.DataFrame({'col1': [1, 2, 3], 'col2': [0.1, 0.2, 0.3]})
def _check_metadata(self, gdf, geometry_column_name='geometry', crs=None):
assert gdf._geometry_column_name == geometry_column_name
assert gdf.crs == crs
def test_merge(self):
res = self.gdf.merge(self.df, left_on='values', right_on='col1')
# check result is a GeoDataFrame
assert isinstance(res, GeoDataFrame)
# check geometry property gives GeoSeries
assert isinstance(res.geometry, GeoSeries)
# check metadata
self._check_metadata(res)
## test that crs and other geometry name are preserved
self.gdf.crs = {'init' :'epsg:4326'}
self.gdf = (self.gdf.rename(columns={'geometry': 'points'})
.set_geometry('points'))
res = self.gdf.merge(self.df, left_on='values', right_on='col1')
assert isinstance(res, GeoDataFrame)
assert isinstance(res.geometry, GeoSeries)
self._check_metadata(res, 'points', self.gdf.crs)
def test_concat_axis0(self):
res = pd.concat([self.gdf, self.gdf])
assert res.shape == (6, 2)
assert isinstance(res, GeoDataFrame)
assert isinstance(res.geometry, GeoSeries)
self._check_metadata(res)
def test_concat_axis1(self):
res = pd.concat([self.gdf, self.df], axis=1)
assert res.shape == (3, 4)
assert isinstance(res, GeoDataFrame)
assert isinstance(res.geometry, GeoSeries)
self._check_metadata(res)
|
bsd-3-clause
|
flightgong/scikit-learn
|
sklearn/feature_selection/tests/test_base.py
|
2
|
3669
|
import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import atleast2d_or_csc
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = atleast2d_or_csc(X)
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
|
bsd-3-clause
|
samshara/Stock-Market-Analysis-and-Prediction
|
smap_nepse/prediction/plotter.py
|
1
|
3450
|
import sys
sys.path.insert(0, '../../smap_nepse')
import pandas as pd
import numpy as np
import prepareInput as pi
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.font_manager as font_manager
from pandas.tools.plotting import table
from preprocessing import moreIndicators as indi
__author__ = "Semanta Bhandari"
__copyright__ = ""
__credits__ = ["Sameer Rai","Sumit Shrestha","Sankalpa Timilsina"]
__license__ = ""
__version__ = "0.1"
__email__ = "[email protected]"
def indicator_plot(df):
index = df.index
df.index = range(len(df.index))
df.columns = ['Transactions','Traded_Shares','Traded_Amount','High','Low','Close']
df = indi.EMA(df, 20)
df = indi.RSI(df, 14)
df = indi.MOM(df, 10)
df = indi.MA(df, 100)
df = indi.MA(df, 20)
df.index = index
last = df[-1:]
df = df.drop(df.columns[:5], axis=1)
# print(df.describe())
print(df.corr())
# print(df.RSI_10)
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
textsize = 9
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axes background color
ax1 = fig.add_axes(rect1, axisbg=axescolor) # left, bottom, width, height
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, axisbg=axescolor, sharex=ax1)
rsi = df.RSI_14*100
fillcolor = 'darkgoldenrod'
ticker = 'NABIL'
ax1.plot(df.index, rsi, color=fillcolor)
ax1.axhline(70, color=fillcolor)
ax1.axhline(30, color=fillcolor)
ax1.fill_between(df.index, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor)
ax1.fill_between(df.index, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor)
ax1.text(0.6, 0.9, '>70 = overbought', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.text(0.6, 0.1, '<30 = oversold', transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30, 70])
ax1.text(0.025, 0.95, 'RSI (14)', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.set_title('%s daily' % ticker)
# plt.figure()
# df.plot()
ma20 = df['MA_20']
ma100 = df['MA_100']
linema100, = ax2.plot(df.index, ma100, color='green', lw=2, label='MA (100)', linestyle = '--')
linema20, = ax2.plot(df.index, ma20, color='blue', lw=2, label='MA (20)', linestyle = '-.')
close, = ax2.plot(df.index, df.Close, color='red', lw=2, label='Close')
s = '%s H:%1.2f L:%1.2f C:%1.2f' % (
last.index[0].date(), last.High, last.Low, last.Close)
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='center left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
ax3.plot(df.index, df.Momentum_10)
ax3.text(0.025, 0.95, 'Momentum (10)', va='top',
transform=ax3.transAxes, fontsize=textsize)
plt.show()
#if __name__ == "__main__" :
dataframe = pi.load_data_frame('NABIL.csv')
indicator_plot(dataframe[1000:])
|
mit
|
fmacias64/spyre
|
examples/stocks_example.py
|
3
|
2387
|
# tested with python2.7 and 3.4
from spyre import server
import pandas as pd
import json
try:
import urllib2
except ImportError:
import urllib.request as urllib2
class StockExample(server.App):
def __init__(self):
# implements a simple caching mechanism to avoid multiple calls to the yahoo finance api
self.data_cache = None
self.params_cache = None
title = "Historical Stock Prices"
inputs = [{ "type":'dropdown',
"label": 'Company',
"options" : [ {"label": "Google", "value":"GOOG"},
{"label": "Yahoo", "value":"YHOO"},
{"label": "Apple", "value":"AAPL"}],
"value":'GOOG',
"key": 'ticker',
"action_id": "update_data"}]
controls = [{ "type" : "hidden",
"id" : "update_data"}]
tabs = ["Plot", "Table"]
outputs = [{ "type" : "plot",
"id" : "plot",
"control_id" : "update_data",
"tab" : "Plot"},
{ "type" : "table",
"id" : "table_id",
"control_id" : "update_data",
"tab" : "Table",
"on_page_load" : True }]
def getData(self, params):
params.pop("output_id",None) # caching layer
if self.params_cache!=params: # caching layer
ticker = params['ticker']
# make call to yahoo finance api to get historical stock data
api_url = 'https://chartapi.finance.yahoo.com/instrument/1.0/{}/chartdata;type=quote;range=3m/json'.format(ticker)
result = urllib2.urlopen(api_url).read()
data = json.loads(result.decode('utf-8').replace('finance_charts_json_callback( ','')[:-1]) # strip away the javascript and load json
self.company_name = data['meta']['Company-Name']
df = pd.DataFrame.from_records(data['series'])
df['Date'] = pd.to_datetime(df['Date'],format='%Y%m%d')
self.data_cache = df # caching layer
self.params_cache = params # caching layer
return self.data_cache
def getPlot(self, params):
### implements a simple caching mechanism to avoid multiple calls to the yahoo finance api ###
params.pop("output_id",None)
while self.params_cache!=params:
pass
###############################################################################################
df = self.getData(params)
plt_obj = df.set_index('Date').drop(['volume'],axis=1).plot()
plt_obj.set_ylabel("Price")
plt_obj.set_title(self.company_name)
fig = plt_obj.get_figure()
return fig
if __name__ == '__main__':
app = StockExample()
app.launch(port=9093)
|
mit
|
glouppe/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
saketkc/statsmodels
|
statsmodels/sandbox/tsa/movstat.py
|
34
|
14871
|
'''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
from __future__ import print_function
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
|
bsd-3-clause
|
pedrocastellucci/playground
|
vrp_scip.py
|
1
|
11813
|
from pyscipopt import Model, quicksum, Pricer, SCIP_RESULT, SCIP_PARAMSETTING
# Using networkx for drawing the solution:
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
class DataVRP:
cap = None # Capacity of the truck
nodes = {} # Position of the nodes
depots = None # We are assuming one depot
demands = {} # Demand for each node. Demand of the depot is zero
costs = {} # Costs from going from i to j
def __init__(self, filename):
with open(filename) as fd:
lines = fd.readlines()
i = 0
size = None # Number of nodes
while i < len(lines):
line = lines[i]
if line.find("DIMENSION") > -1:
size = self.sepIntValue(line)
elif line.find("CAPACITY") > -1:
self.cap = self.sepIntValue(line)
elif line.find("NODE_COORD_SECTION") > -1:
i += 1
line = lines[i]
for _ in range(size):
n, x, y = [int(val.strip()) for val in line.split()]
i += 1 # Last step starts the DEMAND_SECTION
self.nodes[n] = (x, y)
line = lines[i]
# Not elif because of NODE_COORD_SECTION process.
if line.find("DEMAND_SECTION") > -1:
i += 1
line = lines[i]
for _ in range(size):
n, dem = [int(val.strip()) for val in line.split()]
i += 1
self.demands[n] = dem
line = lines[i]
if line.find("DEPOT_SECTION") > -1:
i += 1
depots = []
depot = int(lines[i].strip())
while depot >= 0:
depots.append(depot)
i += 1
depot = int(lines[i].strip())
assert(len(depots) == 1)
self.depot = depots[0]
i += 1
self.computeCostMatrix()
def sepIntValue(self, line):
value = line.split(":")[-1].strip()
return int(value)
def computeCostMatrix(self):
for (p1, (x1, y1)) in self.nodes.items():
for (p2, (x2, y2)) in self.nodes.items():
self.costs[p1, p2] = ((x1 - x2)**2 + (y1 - y2)**2)**0.5
class VRPpricer(Pricer):
# Binary variables z_r indicating whether
# pattern r is used:
z = None
# Data object with input data for the problem:
data = None
# Patterns currently in the problem:
patterns = None
# Function used to compute whether a
# client is visited by a particular pattern.
isClientVisited = None
# Function used to compute the cost of a pattern:
patCost = None
# List of client nodes:
clientNodes = []
# Model that holds the sub-problem:
subMIP = None
# Maximum number of patterns to be created:
maxPatterns = np.Inf
def __init__(self, z, cons, data, patterns,
costs, isClientVisited, patCost, maxPatterns):
self.z, self.cons, self.data, self.patterns = z, cons, data, patterns
self.isClientVisited = isClientVisited
self.patCost = patCost
self.maxPatterns = maxPatterns
for i in data.nodes:
if i != data.depot:
self.clientNodes.append(i)
super()
def pricerredcost(self):
'''
This is a method from the Pricer class.
It is used for adding a new column to the problem.
'''
# Maximum number of patterns reached:
print("Generated %d patterns" % len(self.patterns))
if len(self.patterns) >= self.maxPatterns:
print("Max patterns reached!")
return {'result': SCIP_RESULT.SUCCESS}
colRedCos, pattern = self.getColumnFromMIP(30) # 30 seconds of time limit
print(pattern, colRedCos)
if colRedCos < -0.00001:
newPattern = pattern
obj = self.patCost(newPattern)
curVar = len(self.z)
newVar = self.model.addVar("New_" + str(curVar), vtype="C",
lb=0.0, ub=1.0, obj=obj,
pricedVar=True)
for cs in self.cons:
# Get client from constraint name:
client = int(cs.name.split("_")[-1].strip())
coeff = self.isClientVisited(client, newPattern)
self.model.addConsCoeff(cs, newVar, coeff)
self.patterns.append(newPattern)
self.z[curVar] = newVar
return {'result': SCIP_RESULT.SUCCESS}
def pricerinit(self):
'''
A method of the Pricer class. It is used to convert
the problem into its original form.
'''
for i, c in enumerate(self.cons):
self.cons[i] = self.model.getTransformedCons(c)
def getColumnFromMIP(self, timeLimit):
def getPatternFromSolution(subMIP):
edges = []
for x in subMIP.getVars():
if "x" in x.name:
if subMIP.getVal(x) > 0.99:
i, j = x.name.split("_")[1:]
edges.append((int(i), int(j)))
return edges
# Storing the values of the dual solutions:
dualSols = {}
for c in self.cons:
i = int(c.name.split("_")[-1].strip())
dualSols[i] = self.model.getDualsolLinear(c)
# Model for the sub-problem:
subMIP = Model("VRP-Sub")
subMIP.setPresolve(SCIP_PARAMSETTING.OFF)
subMIP.setMinimize
subMIP.setRealParam("limits/time", timeLimit)
# Binary variables x_ij indicating whether the vehicle
# traverses edge (i, j)
x = {}
for i in self.data.nodes:
for j in self.data.nodes:
if i != j:
x[i, j] = subMIP.addVar(vtype="B", obj=self.data.costs[i, j] - (dualSols[i] if i in self.clientNodes else 0), name="x_%d_%d" % (i, j))
# Non negative variables u_i indicating the demand served up to node i:
u = {}
for i in self.data.nodes:
u[i] = subMIP.addVar(vtype="C", lb=0, ub=self.data.cap, obj=0.0, name="u_%d" % i)
for j in self.clientNodes:
subMIP.addCons(quicksum(x[i, j] for i in self.data.nodes if i != j) <= 1)
for h in self.clientNodes:
subMIP.addCons(quicksum(x[i, h] for i in self.data.nodes if i != h) ==
quicksum(x[h, i] for i in self.data.nodes if i != h))
for i in self.data.nodes:
for j in self.clientNodes:
if i != j:
subMIP.addCons(u[j] >= u[i] + self.data.demands[j]*x[i, j] - self.data.cap*(1 - x[i, j]))
subMIP.addCons(quicksum(x[self.data.depot, j] for j in self.clientNodes) <= 1)
subMIP.hideOutput()
subMIP.optimize()
mipSol = subMIP.getBestSol()
obj = subMIP.getSolObjVal(mipSol)
pattern = getPatternFromSolution(subMIP)
return obj, pattern
class VRPsolver:
data = None
clientNodes = []
# A pattern is a feasible route for visiting
# some clients:
patterns = None
# The master model:
master = None
# The pricer object:
pricer = None
# Max patterns for column generation:
maxPatterns = np.Inf
# If we are solving the linear column generation this
# must be False.
# If it is True, we solve the problem of selecting
# the best patterns to use -- without column generation.
integer = False
def __init__(self, vrpData, maxPatterns):
self.data = vrpData
self.clientNodes = [n for n in self.data.nodes.keys() if n != self.data.depot]
self.maxPatterns = maxPatterns
def genInitialPatterns(self):
'''
Generating initial patterns.
'''
patterns = []
for n in self.clientNodes:
patterns.append([(self.data.depot, n), (n, self.data.depot)])
self.patterns = patterns
def setInitialPatterns(self, patterns):
self.patterns = patterns
def addPatterns(self, patterns):
for pat in patterns:
self.patterns.append(pat)
def patCost(self, pat):
cost = 0.0
for (i, j) in pat:
cost += self.data.costs[i, j]
return cost
def isClientVisited(self, c, pat):
# Check if client c if visited in pattern c:
for (i, j) in pat:
if i == c or j == c:
return 1
return 0
def solve(self, integer=False):
'''
By default we solve a linear version of the column generation.
If integer is True than we solve the problem of finding the best
routes to be used without column generation.
'''
self.integer = integer
if self.patterns is None:
self.genInitialPatterns()
# Creating master Model:
master = Model("Master problem")
# Creating pricer:
master.setPresolve(SCIP_PARAMSETTING.OFF)
# Populating master model.
# Binary variables z_r indicating whether
# pattern r is used in the solution:
z = {}
for i, _ in enumerate(self.patterns):
z[i] = master.addVar(vtype="B" if integer else "C",
lb=0.0, ub=1.0, name="z_%d" % i)
# Set objective:
master.setObjective(quicksum(self.patCost(p)*z[i] for i, p in enumerate(self.patterns)),
"minimize")
print(master.getObjective())
clientCons = [None]*len(self.clientNodes)
for i, c in enumerate(self.clientNodes):
cons = master.addCons(
quicksum(self.isClientVisited(c, p)*z[i] for i, p in enumerate(self.patterns)) == 1,
"Consumer_%d" % c,
separate=False, modifiable=True)
clientCons[i] = cons
if not integer:
pricer = VRPpricer(z, clientCons, self.data, self.patterns,
self.data.costs, self.isClientVisited,
self.patCost, self.maxPatterns)
master.includePricer(pricer, "VRP pricer", "Identifying new routes")
self.pricer = pricer
self.master = master # Save master model.
master.optimize()
def printSolution(self):
if self.integer:
zVars = self.master.getVars()
else:
zVars = self.pricer.z
print(zVars)
usedPatterns = []
for i, z in enumerate(zVars):
if self.master.getVal(zVars[i]) > 0.01:
print(self.patterns[i], self.master.getVal(zVars[i]))
usedPatterns.append(self.patterns[i])
return usedPatterns
def drawSolution(self):
patterns = self.printSolution()
graph = nx.DiGraph()
for pat in patterns:
graph.add_edges_from(pat)
nx.draw_networkx_nodes(graph, self.data.nodes)
nx.draw_networkx_edges(graph, self.data.nodes)
nx.draw_networkx_labels(graph, self.data.nodes)
plt.show()
if __name__ == "__main__":
data = DataVRP("./data/A-VRP/A-n32-k5.vrp")
# data = DataVRP("toy15.vrp")
solver = VRPsolver(data, 180)
solver.solve()
usedPatterns = solver.printSolution()
solver.drawSolution()
solver.genInitialPatterns()
solver.addPatterns(usedPatterns)
solver.solve(integer=True)
solver.drawSolution()
|
gpl-3.0
|
sealevelresearch/timeseries
|
utils/serialize_timeseries.py
|
1
|
1771
|
import argparse
from datetime import datetime, timedelta
import json
import pandas as pd
from tests import test_dataframe
from timeseries.timeseries import TimeSeries
"""
Fixtures for large datasets (i.e minute) needs to be generated for tests.
This utility produces a JSON file which can then be checked manually.
"""
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return super().default(obj)
def obtain_rows(data, start, end, resolution):
dataframe = pd.DataFrame(data)
timeseries = TimeSeries(dataframe, timedelta(hours=4), resolution)
output = timeseries.query(start, end, resolution)
output = output.where(pd.notnull(output), None)
return output.to_records().tolist()
def serialize_to_file(rows, filename):
with open(filename, 'w') as fh:
# JSON custom formatting, due to the output needs to be human editable
# (dumps kwarg indent is not sufficient)
fh.write('[')
for i, row in enumerate(rows):
fh.write('\n\t\t{0}'.format(json.dumps(row, cls=DateTimeEncoder)))
if not i == len(rows) - 1:
fh.write(',')
fh.write('\n]')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('start')
parser.add_argument('end')
parser.add_argument('data')
parser.add_argument('resolution', choices=['S', 'H'])
parser.add_argument('output')
args = parser.parse_args()
data = getattr(test_dataframe, args.data)
start = args.start
end = args.end
resolution = args.resolution
filename = args.output
rows = obtain_rows(data, start, end, resolution)
serialize_to_file(rows, filename)
|
mit
|
abhishekkrthakur/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
26
|
2752
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import metrics
from sklearn.metrics.metrics import confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(metrics.classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
rkmaddox/mne-python
|
mne/dipole.py
|
4
|
57819
|
# -*- coding: utf-8 -*-
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
import functools
from functools import partial
import re
import numpy as np
from .cov import read_cov, compute_whitener
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .fixes import pinvh
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import (transform_surface_to, _compute_nearest,
_points_outside_surface)
from .bem import _bem_find_surface, _bem_surf_name
from .source_space import _make_volume_source_space, SourceSpaces, head_to_mni
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin,
_svd_lwork, _repeated_svd, _get_blas_funcs)
@fill_doc
class Dipole(object):
u"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
conf : dict
Confidence limits in dipole orientation for "vol" in m^3 (volume),
"depth" in m (along the depth axis), "long" in m (longitudinal axis),
"trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am
(currents). The current confidence limit in the depth direction is
assumed to be zero (although it can be non-zero when a BEM is used).
.. versionadded:: 0.15
khi2 : array, shape (n_dipoles,)
The χ^2 values for the fits.
.. versionadded:: 0.15
nfree : array, shape (n_dipoles,)
The number of free parameters for each fit.
.. versionadded:: 0.15
%(verbose)s
See Also
--------
fit_dipole
DipoleFixed
read_dipole
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
verbose=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = dict()
if conf is not None:
for key, value in conf.items():
self.conf[key] = np.array(value)
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
self.verbose = verbose
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save dipole in a .dip or .bdip file.
Parameters
----------
fname : str
The name of the .dip or .bdip file.
%(overwrite)s
.. versionadded:: 0.20
%(verbose_meth)s
Notes
-----
.. versionchanged:: 0.20
Support for writing bdip (Xfit binary) files.
"""
# obligatory fields
fname = _check_fname(fname, overwrite=overwrite)
if fname.endswith('.bdip'):
_write_dipole_bdip(fname, self)
else:
_write_dipole_text(fname, self)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
Returns
-------
self : instance of Dipole
The cropped instance.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,
include_tmax=include_tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,
verbose=None, title=None):
"""Plot dipole locations in 3d.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
%(subjects_dir)s
mode : str
Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
scale : float
The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'.
color : tuple
The color of the dipoles if ``mode`` is 'arrow' or 'sphere'.
fig : mayavi.mlab.Figure | None
Mayavi Scene in which to plot the alignment.
If ``None``, creates a new 600x600 pixel figure with black
background.
.. versionadded:: 0.14.0
%(verbose_meth)s
%(dipole_locs_fig_title)s
.. versionadded:: 0.21.0
Returns
-------
fig : instance of mayavi.mlab.Figure or matplotlib.figure.Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
_check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview'])
from .viz import plot_dipole_locations
return plot_dipole_locations(
self, trans, subject, subjects_dir, mode, coord_frame, idx,
show_all, ax, block, show, scale=scale, color=color, fig=fig,
title=title)
@verbose
def to_mni(self, subject, trans, subjects_dir=None,
verbose=None):
"""Convert dipole location from head coordinate system to MNI coordinates.
Parameters
----------
%(subject)s
%(trans_not_none)s
%(subjects_dir)s
%(verbose)s
Returns
-------
pos_mni : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
"""
mri_head_t, trans = _get_trans(trans)
return head_to_mni(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time.
Parameters
----------
color : matplotlib color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice.
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
selected_conf = dict()
for key in self.conf.keys():
selected_conf[key] = self.conf[key][item]
selected_khi2 = self.khi2[item] if self.khi2 is not None else None
selected_nfree = self.nfree[item] if self.nfree is not None else None
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name, selected_conf, selected_khi2,
selected_nfree)
def __len__(self):
"""Return the number of dipoles.
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Read a fixed dipole FIF file."""
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment)
@fill_doc
class DipoleFixed(ShiftTimeMixin):
"""Dipole class for fixed-position dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
comment : str
The dipole comment.
%(verbose)s
See Also
--------
read_dipole
Dipole
fit_dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind,
comment='', verbose=None): # noqa: D102
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(aspect_kind, 'unknown')
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
self.preload = True
self._update_first_last()
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<DipoleFixed | %s>" % s
def copy(self):
"""Copy the DipoleFixed object.
Returns
-------
inst : instance of DipoleFixed
The copy.
Notes
-----
.. versionadded:: 0.16
"""
return deepcopy(self)
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file.
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
%(verbose_meth)s
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',
'_dip.fif', '_dip.fif.gz',),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True, time_unit='s'):
"""Plot dipole data.
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False,
time_unit=time_unit)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE.
Parameters
----------
fname : str
The name of the .dip or .fif file.
%(verbose)s
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
Dipole
DipoleFixed
fit_dipole
Notes
-----
.. versionchanged:: 0.20
Support for reading bdip (Xfit binary) format.
"""
fname = _check_fname(fname, overwrite='read', must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
elif fname.endswith('.bdip'):
return _read_dipole_bdip(fname)
else:
return _read_dipole_text(fname)
def _read_dipole_text(fname):
"""Read a dipole text file."""
# Figure out the special fields
need_header = True
def_line = name = None
# There is a bug in older np.loadtxt regarding skipping fields,
# so just read the data ourselves (need to get name and header anyway)
data = list()
with open(fname, 'r') as fid:
for line in fid:
if not (line.startswith('%') or line.startswith('#')):
need_header = False
data.append(line.strip().split())
else:
if need_header:
def_line = line
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
del line
data = np.atleast_2d(np.array(data, float))
if def_line is None:
raise IOError('Dipole text file is missing field definition '
'comment, cannot parse %s' % (fname,))
# actually parse the fields
def_line = def_line.lstrip('%').lstrip('#').strip()
# MNE writes it out differently than Elekta, let's standardize them...
fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc.
lambda match: match.group(1).strip() + '/mm', def_line)
fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc.
lambda match: '/' + match.group(1), fields)
fields = re.sub('(begin|end) ', # "begin" and "end" with no units
lambda match: match.group(1) + '/ms', fields)
fields = fields.lower().split()
required_fields = ('begin/ms',
'x/mm', 'y/mm', 'z/mm',
'q/nam', 'qx/nam', 'qy/nam', 'qz/nam',
'g/%')
optional_fields = ('khi^2', 'free', # standard ones
# now the confidence fields (up to 5!)
'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm',
'qlong/nam', 'qtrans/nam')
conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9]
missing_fields = sorted(set(required_fields) - set(fields))
if len(missing_fields) > 0:
raise RuntimeError('Could not find necessary fields in header: %s'
% (missing_fields,))
handled_fields = set(required_fields) | set(optional_fields)
assert len(handled_fields) == len(required_fields) + len(optional_fields)
ignored_fields = sorted(set(fields) -
set(handled_fields) -
{'end/ms'})
if len(ignored_fields) > 0:
warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))
if len(fields) != data.shape[1]:
raise IOError('More data fields (%s) found than data columns (%s): %s'
% (len(fields), data.shape[1], fields))
logger.info("%d dipole(s) found" % len(data))
if 'end/ms' in fields:
if np.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in required_fields]
assert len(idx) >= 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
# Deal with optional fields
optional = [None] * 2
for fi, field in enumerate(optional_fields[:2]):
if field in fields:
optional[fi] = data[:, fields.index(field)]
khi2, nfree = optional
conf = dict()
for field, scale in zip(optional_fields[2:], conf_scales): # confidence
if field in fields:
conf[field.split('/')[0]] = scale * data[:, fields.index(field)]
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_text(fname, dip):
fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'
header = ('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')
t = dip.times[:, np.newaxis] * 1000.
gof = dip.gof[:, np.newaxis]
amp = 1e9 * dip.amplitude[:, np.newaxis]
out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof)
# optional fields
fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),
nfree=(' free', ' %5d', 1),
vol=(' vol/mm^3', ' %9.3f', 1e9),
depth=(' depth/mm', ' %9.3f', 1e3),
long=(' long/mm', ' %8.3f', 1e3),
trans=(' trans/mm', ' %9.3f', 1e3),
qlong=(' Qlong/nAm', ' %10.3f', 1e9),
qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),
)
for key in ('khi2', 'nfree'):
data = getattr(dip, key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):
data = dip.conf.get(key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
out = np.concatenate(out, axis=-1)
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write((header + '\n').encode('utf-8'))
np.savetxt(fid, out, fmt=fmt)
if dip.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% dip.name).encode('utf-8'))
_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans')
def _read_dipole_bdip(fname):
name = None
nfree = None
with open(fname, 'rb') as fid:
# Which dipole in a multi-dipole set
times = list()
pos = list()
amplitude = list()
ori = list()
gof = list()
conf = dict(vol=list())
khi2 = list()
has_errors = None
while True:
num = np.frombuffer(fid.read(4), '>i4')
if len(num) == 0:
break
times.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # end
fid.read(12) # r0
pos.append(np.frombuffer(fid.read(12), '>f4'))
Q = np.frombuffer(fid.read(12), '>f4')
amplitude.append(np.linalg.norm(Q))
ori.append(Q / amplitude[-1])
gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0])
this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0])
if has_errors is None:
has_errors = this_has_errors
for key in _BDIP_ERROR_KEYS:
conf[key] = list()
assert has_errors == this_has_errors
fid.read(4) # Noise level used for error computations
limits = np.frombuffer(fid.read(20), '>f4') # error limits
for key, lim in zip(_BDIP_ERROR_KEYS, limits):
conf[key].append(lim)
fid.read(100) # (5, 5) fully describes the conf. ellipsoid
conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0])
khi2.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # prob
fid.read(4) # total noise estimate
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_bdip(fname, dip):
with open(fname, 'wb+') as fid:
for ti, t in enumerate(dip.times):
fid.write(np.zeros(1, '>i4').tobytes()) # int dipole
fid.write(np.array([t, 0]).astype('>f4').tobytes())
fid.write(np.zeros(3, '>f4').tobytes()) # r0
fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos
Q = dip.amplitude[ti] * dip.ori[ti]
fid.write(Q.astype('>f4').tobytes())
fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes())
has_errors = int(bool(len(dip.conf)))
fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors
fid.write(np.zeros(1, '>f4').tobytes()) # noise level
for key in _BDIP_ERROR_KEYS:
val = dip.conf[key][ti] if key in dip.conf else 0.
assert val.shape == ()
fid.write(np.array(val, '>f4').tobytes())
fid.write(np.zeros(25, '>f4').tobytes())
conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0.
fid.write(np.array(conf, '>f4').tobytes())
khi2 = dip.khi2[ti] if dip.khi2 is not None else 0
fid.write(np.array(khi2, '>f4').tobytes())
fid.write(np.zeros(1, '>f4').tobytes()) # prob
fid.write(np.zeros(1, '>f4').tobytes()) # total noise est
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff."""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True)
B = np.concatenate(B, axis=1)
assert np.isfinite(B).all()
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
_, _, dgemm = _get_ddot_dgemv_dgemm()
B = dgemm(1., B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
@verbose
def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None):
"""Make a guess space inside a sphere or BEM surface."""
if 'rr' in surf:
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_surf_name[surf['id']],
_coord_frame_name(surf['coord_frame'])))
else:
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * surf['R']))
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)[0]
assert 'vertno' in src
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']), type='discrete')
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
lwork=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
@functools.lru_cache(None)
def _get_ddot_dgemv_dgemm():
return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm'))
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ddot, dgemv, _ = _get_ddot_dgemv_dgemm()
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B)
Bm2 = ddot(one, one) # np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
from scipy import linalg
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
one /= sing[:ncomp]
Q = np.dot(one, uu.T[:ncomp])
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * np.sum(one / sing)
ncomp = 3
# Counteract the effect of column normalization
Q *= scales[0]
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
# As describedd in the Xfit manual, confidence intervals can be calculated
# by examining a linearization of model at the best-fitting location,
# i.e. taking the Jacobian and using the whitener:
#
# J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]
# C = (J.T C^-1 J)^-1
#
# And then the confidence interval is the diagonal of C, scaled by 1.96
# (for 95% confidence).
from scipy import linalg
direction = np.empty((3, 3))
# The coordinate system has the x axis aligned with the dipole orientation,
direction[0] = ori
# the z axis through the origin of the sphere model
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize
direction[2] /= np.linalg.norm(direction[2])
# and the y axis perpendical with these forming a right-handed system.
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
# Get spatial deltas in dipole coordinate directions
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# Get current (Q) deltas in the dipole directions
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# J is already whitened, so we don't need to do np.dot(whitener, J).
# However, the units in the Jacobian are potentially quite different,
# so we need to do some normalization during inversion, then revert.
direction_norm = np.linalg.norm(J[:, :3])
Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z
norm = np.array([direction_norm] * 3 + [Q_norm] * 3)
J /= norm
J = np.dot(J.T, J)
C = pinvh(J, rtol=1e-14)
C /= norm
C /= norm[:, np.newaxis]
conf = 1.96 * np.sqrt(np.diag(C))
# The confidence volume of the dipole location is obtained from by
# taking the eigenvalues of the upper left submatrix and computing
# v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:
vol_conf = 4 * np.pi / 3. * np.sqrt(
476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))
conf = np.concatenate([conf, [vol_conf]])
# Now we reorder and subselect the proper columns:
# vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)
conf = conf[[6, 2, 0, 1, 3, 4]]
return conf
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint."""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint."""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, fmin_cobyla, ori, rank):
"""Fit a single bit of data."""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if 'rr' in fwd_data['inner_skull']: # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
constraint = partial(
_sphere_constraint, r0=fwd_data['inner_skull']['r0'],
R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
lwork = _svd_lwork((3, B.shape[0]))
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
lwork=lwork)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual_noproj, n_comp = _fit_Q(
fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)
khi2 = (1 - gof) * B2
nfree = rank - n_comp
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank):
"""Fit a data using a fixed position."""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)
# Compute the dipole moment
Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,
rd=None, ori=ori)[:3]
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
rd_final = guess_rrs[0]
# This will be slow, and we don't use it anyway, so omit it for now:
# conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
conf = khi2 = nfree = None
# No corresponding 'logger' message here because it should go *very* fast
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, rank=None, verbose=None):
"""Fit a dipole.
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in millimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
%(n_jobs)s
It is used in field computation and fitting.
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
%(rank_None)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None, otherwise a
:class:`mne.Dipole` is returned.
residual : instance of Evoked
The M-EEG data channels with the fitted dipolar activity removed.
See Also
--------
mne.beamformer.rap_music
Dipole
DipoleFixed
read_dipole
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy import linalg
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, str):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
mri_head_t, trans = _get_trans(trans)
logger.info('MRI transform : %s' % trans)
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
inner_skull['r0'] = r0
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
del R, r0
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(np.linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame
del R, r0
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, str):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = evoked.get_channel_types()
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float64)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener, _, rank = compute_whitener(cov, info, picks=picks,
rank=rank, return_rank=True)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,
guess_mindist, n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if 'rr' in inner_skull:
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if 'rr' in inner_skull:
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(
pos, inner_skull['r0'],
R_adj=inner_skull['R'] - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, ori, n_jobs, rank)
assert len(out) == 8
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.full(12, np.nan),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info['bads'] = []
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, comment=comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,
out[4], out[5], out[6])
residual = evoked.copy().apply_proj() # set the projs active
residual.data[picks] = np.dot(proj_op, out[-1])
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
"""Get standard phantom dipole locations and orientations.
Parameters
----------
kind : str
Get the information for the given system:
``vectorview`` (default)
The Neuromag VectorView phantom.
``otaniemi``
The older Neuromag phantom used at Otaniemi.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
Notes
-----
The Elekta phantoms have a radius of 79.5mm, and HPI coil locations
in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).
"""
_check_option('kind', kind, ['vectorview', 'otaniemi'])
if kind == 'vectorview':
# these values were pulled from a scanned image provided by
# Elekta folks
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
signs = ([1, -1] * 4 + [-1, 1] * 4) * 2
elif kind == 'otaniemi':
# these values were pulled from an Neuromag manual
# (NM20456A, 13.7.1999, p.65)
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
signs = [-1] * 8 + [1] * 16 + [-1] * 8
pos = np.vstack((x, y, z)).T / 1000.
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for pi, this_pos in enumerate(pos):
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
this_ori *= signs[pi]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
"""Concatenate a list of dipoles."""
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
|
bsd-3-clause
|
etkirsch/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
234
|
9928
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
henridwyer/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
44
|
22866
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
|
bsd-3-clause
|
paladin74/neural-network-animation
|
matplotlib/backends/backend_qt5.py
|
10
|
29378
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from .qt_compat import QtCore, QtGui, QtWidgets, _getSaveFileName, __version__
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
def fn_name():
return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
if DEBUG:
print("Starting up QApplication")
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if hasattr(QtGui, "QX11Info"):
display = os.environ.get('DISPLAY')
if display is None or not re.search(':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([str(" ")])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
class Show(ShowBase):
def mainloop(self):
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def __del__(self):
# Probably not necessary in practice, but is good behavior to
# disconnect
try:
TimerBase.__del__(self)
self._timer.timeout.disconnect(self._on_timer)
except RuntimeError:
# Timer C++ object already deleted
pass
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt5: ', figure)
_create_qApp()
# NB: Using super for this call to avoid a TypeError:
# __init__() takes exactly 2 arguments (1 given) on QWidget
# PyQt5
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self.setMouseTracking(True)
self._idle = True
# hide until we can test and fix
# self.startTimer(backend_IdleEvent.milliseconds)
w, h = self.get_width_height()
self.resize(w, h)
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button)
if DEBUG:
print('button pressed:', event.button())
def mouseDoubleClickEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True)
if DEBUG:
print('button doubleclicked:', event.button())
def mouseMoveEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event(self, x, y)
# if DEBUG: print('mouse move')
def mouseReleaseEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button)
if DEBUG:
print('button released')
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps != 0:
FigureCanvasBase.scroll_event(self, x, y, steps)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
def keyPressEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_press_event(self, key)
if DEBUG:
print('key press', key)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event(self, key)
if DEBUG:
print('key release', key)
def resizeEvent(self, event):
w = event.size().width()
h = event.size().height()
if DEBUG:
print('resize (%d x %d)' % (w, h))
print("FigureCanvasQt.resizeEvent(%d, %d)" % (w, h))
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw()
self.update()
QtWidgets.QWidget.resizeEvent(self, event)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs)
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
start_event_loop.__doc__ = \
FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = FigureCanvasBase.stop_event_loop_default.__doc__
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
if d:
QtCore.QTimer.singleShot(0, idle_draw)
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__(self, canvas, num):
if DEBUG:
print('FigureManagerQT.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self._show_message)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
@QtCore.Slot()
def _show_message(self, s):
# Fixes a PySide segfault.
self.window.statusBar().showMessage(s)
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
if DEBUG:
print("destroy figure manager")
self.window.close()
def get_window_title(self):
return str(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if figureoptions is not None:
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit curves line and axes parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
if figureoptions is not None:
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
title = axes.get_title()
ylabel = axes.get_ylabel()
label = axes.get_label()
if title:
fmt = "%(title)s"
if ylabel:
fmt += ": %(ylabel)s"
fmt += " (%(axes_repr)s)"
elif ylabel:
fmt = "%(axes_repr)s (%(ylabel)s)"
elif label:
fmt = "%(axes_repr)s (%(label)s)"
else:
fmt = "%(axes_repr)s"
titles.append(fmt % dict(title=title,
ylabel=ylabel, label=label,
axes_repr=repr(axes)))
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def dynamic_update(self):
self.canvas.draw()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor(self, cursor):
if DEBUG:
print('Set cursor', cursor)
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(rect)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
startpath = matplotlib.rcParams.get('savefig.directory', '')
startpath = os.path.expanduser(startpath)
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath == '':
# explicitly missing key or empty str signals to use cwd
matplotlib.rcParams['savefig.directory'] = startpath
else:
# save dir for next time
savefig_dir = os.path.dirname(six.text_type(fname))
matplotlib.rcParams['savefig.directory'] = savefig_dir
try:
self.canvas.print_figure(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", str(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(SubplotTool, UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.donebutton.clicked.connect(self.close)
self.resetbutton.clicked.connect(self.reset)
self.tightlayout.clicked.connect(self.functight)
# constraints
self.sliderleft.valueChanged.connect(self.sliderright.setMinimum)
self.sliderright.valueChanged.connect(self.sliderleft.setMaximum)
self.sliderbottom.valueChanged.connect(self.slidertop.setMinimum)
self.slidertop.valueChanged.connect(self.sliderbottom.setMaximum)
self.defaults = {}
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
self.defaults[attr] = getattr(self.targetfig.subplotpars, attr)
slider = getattr(self, 'slider' + attr)
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
slider.valueChanged.connect(getattr(self, 'func' + attr))
self._setSliderPositions()
def _setSliderPositions(self):
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
slider = getattr(self, 'slider' + attr)
slider.setSliderPosition(int(self.defaults[attr] * 1000))
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(left=val)
self.leftvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(right=val)
self.rightvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(bottom=val)
self.bottomvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(top=val)
self.topvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(wspace=val)
self.wspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(hspace=val)
self.hspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functight(self):
self.targetfig.tight_layout()
self._setSliderPositions()
self.targetfig.canvas.draw()
def reset(self):
self.targetfig.subplots_adjust(**self.defaults)
self._setSliderPositions()
self.targetfig.canvas.draw()
def error_msg_qt(msg, parent=None):
if not is_string_like(msg):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += str(value)
if len(msg):
error_msg_qt(msg)
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
|
mit
|
Haunter17/MIR_SU17
|
exp11/exp11_NewDataChanges.py
|
1
|
28993
|
import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import scipy.io as sp
from random import randint
# Functions for initializing neural nets parameters
def init_weight_variable(shape, nameIn):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def init_bias_variable(shape, nameIn):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def preprocessQ(Q, downsamplingRate):
'''
Take in CQT matrix Q
'''
# take the abs to make it real, then log it
Q = np.log10(abs(Q))
# downsample
numCols = np.shape(Q)[1]
# keep only 1 out of every downRate cols
Q = Q[:,range(0, numCols, downsamplingRate)]
return Q
def loadData(filepath, datapath, downsamplingRate):
'''
filepath: gives the path to a file which holds a list of filenames and labels,
each of which has one CQT matrix. It also holds the labels for each of these files
datapath: gives a path to the folder containing the raw
CQT data (which the filepaths retrieved from the 'filepath' file)
will descend from)
Load and return four variables from the file with path filepath
X_train: input data for training, a stack of CQT matrices, already pre-processed
y_train: labels for X_train
X_val: input data for validation, a stack of CQT matrices, already pre-processed
y_val: labels for X_val
'''
print('==> Loading raw cqt data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
fileAndLabelData = sp.loadmat(filepath)
trainFileList = [str(i[0]) for i in fileAndLabelData['trainFileList'][0]] # un-nest it
valFileList = [str(i[0]) for i in fileAndLabelData['valFileList'][0]]
y_train = np.array(fileAndLabelData['trainLabels']).squeeze().reshape((-1,1)) # reshape into cols
y_val = np.array(fileAndLabelData['valLabels']).squeeze().reshape((-1,1))
# loop through each of the training and validation files and pull the CQT out
X_train = []
for i in range(len(trainFileList)):
print('Loading training data: file %d from %s'%(i, datapath + trainFileList[i]))
data = sp.loadmat(datapath + trainFileList[i])
rawQ = data['Q']['c'][0][0]
X_train.append(preprocessQ(rawQ, downsamplingRate)) # not sure why so nested
X_val = []
for i in range(len(valFileList)):
print('Loading validation data: file %d from %s'%(i, datapath + valFileList[i]))
data = sp.loadmat(datapath + valFileList[i])
rawQ = data['Q']['c'][0][0]
X_val.append(preprocessQ(rawQ, downsamplingRate))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
print('-- Number of training songs: {}'.format(len(X_train)))
print('-- Number of validation songs: {}'.format(len(X_val)))
return [X_train, y_train, X_val, y_val]
def loadReverbSamples(filepath):
data = sp.loadmat(filepath)
return data['reverbSamples']
def getMiniBatch(cqtList, labelMatrix, batch_size, batchWidth, makeNoisy, reverbMatrix=[]):
'''
Inputs
cqtList: a list, where each element is a cqt matrix
batch_size: the number to get in this batch
labelMatrix: a one-hot encoding of the labels, where the nth row corresponds to the label
for the nth cqt matrix in cqtList
labelList: nxk matrix of labels
'''
# pick batch_size random songs to sample one sample from (allow repeats)
songNums = np.random.randint(0, len(cqtList), size=batch_size)#[randint(0,len(cqtList) - 1) for i in range(batch_size)]
labels = (np.array(labelMatrix))[songNums, :] # grab the labels for each random song
batch = np.array([])
# for each song, pull out a single sample
for i in range(batch_size):
songNum = songNums[i]
curCQT = cqtList[songNum]
startCol = randint(0, np.shape(curCQT)[1] - batchWidth)
curSample = curCQT[:, startCol: startCol + batchWidth]
if makeNoisy:
curSample = addReverbNoise(curSample, reverbMatrix)
# make it have a global mean of 0 before appending
curSample = curSample - np.mean(curSample)
# string out the sample into a row and add it to the batch
curSample = np.reshape(curSample, (1,-1))
batch = np.vstack((batch, curSample)) if batch.size else curSample # if and else to deal the first loop through when batch is empty
return [batch, labels]
def addReverbNoise(Q, reverbMatrix):
# pick a random column and add it to each column of Q
randCol = randint(0, np.shape(reverbMatrix)[1] - 1)
col = np.reshape(reverbMatrix[:, randCol], (-1, 1)) # reshape because it converts to a row
return Q + col
# set up property that makes it only be set once
# we'll use this to avoid adding tensors to the graph multiple times
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, num_frames, X_train, y_train, X_val, y_val, reverbSamples, filter_row, filter_col, k1, learningRate, debug):
'''
Initializer for the model
'''
# store the data
self.X_train, self.y_train, self.X_val, self.y_val, self.reverbSamples = X_train, y_train, X_val, y_val, reverbSamples
# store the parameters sent to init that define our model
self.num_frames, self.filter_row, self.filter_col, self.k1, self.learningRate, self.debug = num_frames, filter_row, filter_col, k1, learningRate, debug
# find num_training_vec, total_features, num_frames, num_classes, and l from the shape of the data
# and store them
self.storeParamsFromData()
# Set-up and store the input and output placeholders
x = tf.placeholder(tf.float32, [None, self.total_features])
y_ = tf.placeholder(tf.float32, [None, self.num_classes])
self.x = x
self.y_ = y_
# Setup and store tensor that performs the one-hot encoding
y_train_OHEnc = tf.one_hot(self.y_train.copy(), self.num_classes)
y_val_OHEnc = tf.one_hot(self.y_val.copy(), self.num_classes)
self.y_train_OHEnc = y_train_OHEnc
self.y_val_OHEnc = y_val_OHEnc
# create each lazy_property
# each lazy_property will add tensors to the graph
self.y_conv
self.cross_entropy
self.train_step
self.accuracy
# properties for use in debugging
if self.debug:
self.grads_and_vars
# print to the user that the network has been set up, along with its properties
print("Setting up Single Conv Layer Neural net with %g x %g filters, k1 = %g, learningRate = %g"%(filter_row, filter_col, k1, learningRate))
def storeParamsFromData(self):
'''
Calculate and store parameters from the raw data
total_features: The number of CQT coefficients total (incldues all context frames)
num_training_vec: The number of training examples in your dataset
num_frames: The number of context frames in each training example (total_features / num_freq)
num_classes: The number of songs we're distinguishing between in our output
l: The length of our second convolutional kernel - for now, its equal to num_frames
'''
# Neural-network model set-up
# calculating some values which will be nice as we set up the model
num_freq = self.X_train[0].shape[0]
total_features = num_freq * self.num_frames
print('-- Num freq: {}'.format(num_freq))
num_classes = int(max(self.y_train.max(), self.y_val.max()) + 1)
# store what will be helpful later
self.num_freq = num_freq
self.total_features = total_features
self.num_classes = num_classes
@lazy_property
def y_conv(self):
# reshape the input into the form of a spectrograph
x_image = tf.reshape(self.x, [-1, self.num_freq, self.num_frames, 1])
x_image = tf.identity(x_image, name="x_image")
# first convolutional layer parameters
self.W_conv1 = init_weight_variable([self.filter_row, self.filter_col, 1, self.k1], "W_conv1")
self.b_conv1 = init_bias_variable([self.k1], "b_conv1")
# tensor that computes the output of the first convolutional layer
h_conv1 = tf.nn.relu(conv2d(x_image, self.W_conv1) + self.b_conv1)
h_conv1 = tf.identity(h_conv1, name="h_conv_1")
# flatten out the output of the first convolutional layer to pass to the softmax layer
h_conv1_flat = tf.reshape(h_conv1, [-1, (self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1])
h_conv1_flat = tf.identity(h_conv1_flat, name="h_conv1_flat")
# softmax layer parameters
self.W_sm = init_weight_variable([(self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1, self.num_classes], "W_sm")
self.b_sm = init_bias_variable([self.num_classes], "b_sm")
# the output of the layer - un-normalized and without a non-linearity
# since cross_entropy_with_logits takes care of that
y_conv = tf.matmul(h_conv1_flat, self.W_sm) + self.b_sm
y_conv = tf.identity(y_conv, name="y_conv")
return y_conv # would want to softmax it to get an actual prediction
@lazy_property
def cross_entropy(self):
'''
Create a tensor that computes the cross entropy cost
Use the placeholder y_ as the labels, with input y_conv
Note that softmax_cross_entropy_with_logits takes care of normalizing
y_conv to make it a probability distribution
This tensor can be accessed using: self.cross_entropy
'''
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
cross_entropy = tf.identity(cross_entropy, name="cross_entropy")
return cross_entropy
@lazy_property
def optimizer(self):
'''
Create a tensor that represents the optimizer. This tensor can
be accessed using: self.optimizer
'''
optimizer = tf.train.AdamOptimizer(learning_rate = self.learningRate)
return optimizer
@lazy_property
def train_step(self):
'''
Creates a tensor that represents a single training step. This tensor
can be passed a feed_dict that has x and y_, and it will compute the gradients
and perform a single step.
This tensor can be accessed using: self.train_step
'''
return self.optimizer.minimize(self.cross_entropy)
@lazy_property
def accuracy(self):
'''
Create a tensor that computes the accuracy, using the placeholder y_ as the labeled data
and y_conv for the predictions of the network.
This tensor can be accessed using: self.accuracy
'''
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
'''
Properties that we'll use for debugging
'''
@lazy_property
def grads_and_vars(self):
grads_and_vars = self.optimizer.compute_gradients(self.cross_entropy, tf.trainable_variables())
return grads_and_vars
def train(self, batch_size, num_batches, print_freq, debug_out='debug.txt'):
'''
Train the Network on the data that will have been loaded when the NN is initialized
Trained on: self.X_train, and a OH encoding of self.y_train
Trains with batch_size batches for num_epochs epochs
Debugging info is written to debug.txt (can add params to have more places to write out
to)
'''
print("==> Entered Training")
# Starting an interactive session and initializing the parameters
#sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# replace it with the one-hot encoded one --- should I replace?
y_trainOH = sess.run(self.y_train_OHEnc)[:, 0, :]
y_valOH = sess.run(self.y_val_OHEnc)[:, 0, :]
# lists to record accuracy at several points during training
train_acc_list = []
val_acc_list = []
train_acc_on_batch_list = []
# lists to record the error at several points during training
train_err_list = []
val_err_list = []
train_err_on_batch_list = []
# track which batches you record data during
batch_numbers = []
# record the start time
t_start = time.time()
# keep track of the best weights and biases
best_weights = [self.W_conv1.eval(), self.W_sm.eval()]
best_biases = [self.b_conv1.eval(), self.b_sm.eval()]
# validation batch size just to get statistics with
valStatisticBatchSize = 500
[val_batch_data, val_batch_label] = getMiniBatch(X_val, y_valOH, valStatisticBatchSize, self.num_frames, False)
bestValidationError = self.evalByBatch(self.cross_entropy, val_batch_data, val_batch_label, 5000);
for batchNum in range(num_batches):
batchStart = time.time()
fetchMiniStart = time.time()
[train_batch_data, train_batch_label] = getMiniBatch(X_train, y_trainOH, batch_size, self.num_frames, True, self.reverbSamples)
fetchMiniEnd = time.time()
self.train_step.run(feed_dict={self.x: train_batch_data, self.y_: train_batch_label})
batchEnd = time.time()
# print and record data now that we've trained on our full training set
if (batchNum + 1) % print_freq == 0:
# timing for the measurements of cost and accuracy
evaluationStart = time.time()
# val batch for evaluation
[val_batch_data, val_batch_label] = getMiniBatch(X_val, y_valOH, valStatisticBatchSize, self.num_frames, False)
# evaluate training error and accuracy only on the most recent batch
# start with accuracy
train_acc = self.evalByBatch(self.accuracy, train_batch_data, train_batch_label, 5000)
train_acc_list.append(train_acc)
val_acc = self.evalByBatch(self.accuracy, val_batch_data, val_batch_label, 5000)
val_acc_list.append(val_acc)
# Now we compute the error on each set:
train_err = self.evalByBatch(self.cross_entropy, train_batch_data, train_batch_label, 5000)
train_err_list.append(train_err)
val_err = self.evalByBatch(self.cross_entropy, val_batch_data, val_batch_label, 5000)
val_err_list.append(val_err)
# if the validation error is better then store the current weights
if (val_err < bestValidationError):
# update the best error, weights, and biases
print('new best =D ');
bestValidationError = val_err
best_weights = [self.W_conv1.eval(), self.W_sm.eval()]
best_biases = [self.b_conv1.eval(), self.b_sm.eval()]
# keep track of which epochs we have data for
batch_numbers += [batchNum]
# this marks the end of our evaluation
evaluationEnd = time.time()
# print a summary of our NN at this epoch
print("batch: %d, time (train, evaluation, fetchMini): (%g, %g, %g), t acc, v acc, t cost, v cost: %.5f, %.5f, %.5f, %.5f"%(batchNum+1, batchEnd - batchStart, evaluationEnd - evaluationStart, fetchMiniEnd - fetchMiniStart, train_acc, val_acc, train_err, val_err))
# debugging print outs
if self.debug:
# print out step / current value ratio for each parameter in our network
# based on training data from the most recent batch
# to the file with name debug_out
self.debug_WriteGradAndVar(train_batch_data, train_batch_label, epoch, debug_out)
# record the total time spent training the neural network
t_end = time.time()
print('--Time elapsed for training for %g epochs: %g'%(num_epochs, t_end - t_start))
# return the lists of logged data
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers, best_weights, best_biases]
def evalByBatch(self, toEval, x, y_, batchSize):
weightedAvg = 0.0
for i in range(0, len(x), batchSize):
batch_end_point = min(i + batchSize, len(x))
batch_data = x[i : batch_end_point]
batch_label = y_[i : batch_end_point]
curAmount = toEval.eval(feed_dict={self.x: batch_data, self.y_: batch_label})
# weight by the length of the batch and keep adding on
weightedAvg = weightedAvg + curAmount * float(batch_end_point - i) / len(x)
return weightedAvg
def debug_WriteGradAndVar(self, xDebug, yDebug, epoch, debug_out):
'''
Helper function that prints the ratio of the training step that would be taken
on input data and labels xDebug and yDebug to the magnitude of each parameter
in the network. This gives us a sense of how much each parameter is changing.
Inputs:
xDebug: input data to calculate the gradient from
yDebug: labels for the input data
epoch: the number of the epoch (to print out to the file)
debug_out: the file to write to - if it doesn't exist it will be created
'''
file_object = open(debug_out, 'a+')
# record which epoch this is
file_object.write("Epoch: %d\n"%(epoch))
# find the current learning rate - this will be used with the gradient to find the step size
curLearningRate = self.optimizer._lr
# print each gradient and the variables they are associated with
# the gradients are stored in tuples, where the first element is a tensor
# that computes the gradient, and the second is the parameter that gradient
# is associated with
for gv in self.grads_and_vars:
curGrads = gv[0].eval(feed_dict={self.x: xDebug, self.y_: yDebug})
curSteps = curGrads * curLearningRate # scale down the graident by the learning rate
curVars = gv[1].eval()
# How much, compared to the magnitude of the weight, are we stepping
stepToVarRatio = np.absolute(np.divide(curSteps, curVars))
# print the name of the variable, then all the step ratios (step amount / current value)
# these values will have been averaged across the training examples
curName = gv[1].name
file_object.write("Variable: " + curName + "\n")
for index, step in np.ndenumerate(stepToVarRatio):
file_object.write(str(index) + ": " + str(step) + "\n")
# print summary statistics for this layer
maxVal = np.amax(stepToVarRatio)
thirdQuartile = np.percentile(stepToVarRatio, 75)
mean = np.mean(stepToVarRatio)
median = np.median(stepToVarRatio)
firstQuartile = np.percentile(stepToVarRatio, 25)
minVal = np.amin(stepToVarRatio)
file_object.write("Statistics: (%g, %g, %g, %g, %g, %g)\n"%(minVal, firstQuartile, median, mean, thirdQuartile, maxVal))
file_object.write("---------------------------------------\n")
# close the file
file_object.close()
def makeTrainingPlots(epochs, paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, titles, filenames):
'''
Plots of the given training and validation metrics versus epoch number. One plot per list
in trainingMetricLists and validationMetricLists. Assume there will be the same number of sublists
in both those parameters. Titles will hold a list of strings that will be used for the titles
of the graphs. The last title will be for the plot with all the validation curves. Filenames is a list of filenames to save your plots to
Input:
epochs: a list of the epochs on which data was taken - assume all of them took
data at the same epoch numbers
paramValues: the values of the param that we were varying (to label the curves in our validation plot)
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName: name of the parameter you're varying (e.g. learningRate or kernel height)
metricName: the name of the metric (e.g. accuracy, or cross-entropy error), to be used on the y-axis
titles: titles for the graph (will include info on the params used).
*The last title will be for the validation plot
filename: the filenames to write the graphs to (will include info on the params used)
* the last filename will be for the validation plot
Output:
Write a png file for each list in trainingMetricLists/validationMetricLists with the desired plot
'''
# figure with all the validation curves
validationFig = plt.figure(figsize=(7, 4))
validationPlot = validationFig.add_subplot(111)
# go through each setup and make a plot for each
for i in range(len(trainingMetricLists)):
# pull out the list we're concerned with
trainingMetric = trainingMetricLists[i]
validationMetric = validationMetricLists[i]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(epochs, trainingMetric, '-', label="Training")
myPlot.plot(epochs, validationMetric, '-', label="Validation")
myPlot.set_xlabel("Epoch Number")
myPlot.set_ylabel(metricName)
myPlot.set_title(titles[i])
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filenames[i])
# update the figure with all the validation curves
validationPlot.plot(epochs, validationMetric, '-', label=(paramName + " = " + str(paramValues[i])))
# finish labeling + write the validation plot
validationPlot.set_xlabel("Epoch Number")
validationPlot.set_ylabel(metricName)
validationPlot.set_title(titles[-1])
validationPlot.legend(loc="best", frameon=False)
validationFig.savefig(filenames[-1])
def makeBestResultPlot(paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, title, filename):
'''
Plot the "best" value of the training and validation metric against the param that led to it
Best is assumed to be the largest value of the metric
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName:
metricName:
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
bestTrainingMetrics = [max(curList) for curList in trainingMetricLists]
bestValidationMetrics = [max(curList) for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, bestTrainingMetrics, '-', label="Training")
myPlot.plot(paramValues, bestValidationMetrics, '-', label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
def makeEndResultPlot(paramValues, trainingMetricLists, validationMetricLists, paramName, metricName, title, filename):
'''
Plot the final value of the training and validation metric against the param that led to it
Input:
trainingMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
validationMetricLists: a list of lists, where each list represents some metric on the progress of training throughout training
paramName:
metricName:
title: the title of the graph (will include info on the params used)
filename: the filename to write the graph to (will include info on the params used)
Output:
Write a png file with the desired plot
Is there a way to call the other one to do this? if didn't assume epoch number then yes - oh well
'''
finalTrainingMetrics = [curList[-1] for curList in trainingMetricLists]
finalValidationMetrics = [curList[-1] for curList in validationMetricLists]
# make the figure, add plots, axis lables, a title, and legend
fig = plt.figure(figsize=(7, 4))
myPlot = fig.add_subplot(111)
myPlot.plot(paramValues, finalTrainingMetrics, '-', label="Training")
myPlot.plot(paramValues, finalValidationMetrics, '-', label="Validation")
myPlot.set_xlabel(paramName)
myPlot.set_ylabel(metricName)
myPlot.set_title(title)
myPlot.legend(loc="best", frameon=False)
# Write the figure
fig.savefig(filename)
'''
Our main, with 121x1 convolutional layer.
'''
# read in command line parameters
try:
# read in a list of the row numbers for the kernels
filterRowsString = sys.argv[1]
filterRowsIn = map(int, filterRowsString.strip('[]').split(','))
# read in a list of the col numbers for the kernels
filterColsString = sys.argv[2]
# map it from a string into a list of ints
filterColsIn = map(int, filterColsString.strip('[]').split(','))
# read in the number of epochs
k1sString = sys.argv[3]
k1sIn = map(int, k1sString.strip('[]').split(','))
numBatches = int(sys.argv[4])
finalPlotName = sys.argv[5]
except Exception, e:
print('-- {}'.format(e))
# filepath to the data you want to laod
filepath = '/pylon2/ci560sp/cstrong/exp11/new_data/deathcabforcutie_out/FilesAndLabels.mat'
datapath = '/pylon2/ci560sp/cstrong/exp11/new_data/deathcabforcutie_out/'
reverbPath = '/pylon2/ci560sp/cstrong/exp11/new_data/deathcabforcutie_out/reverbSamples_dcfc.mat'
# define the configurations we're going to be looking at
# in this exp: just change the number of rows in a vertical kernel
filterCols = filterColsIn
filterRows = filterRowsIn
k1s = k1sIn
learningRates = [0.0005] * len(filterRows)
# set training parameters
batchSize = 256
print_freq = 5
# make lists to store data
train_acc_lists = []
val_acc_lists = []
train_err_lists = []
val_err_lists = []
epoch_number_lists = []
best_weights_lists = []
best_biases_lists = []
# load data
downsamplingRate = 12
sixSecondsNoDownsampling = 1449
print("==> Loading Training and Validation Data")
[X_train, y_train, X_val, y_val] = loadData(filepath, datapath, downsamplingRate)
print("==> Loading Reverb Data")
reverbSamples = loadReverbSamples(reverbPath)
# loop through the setups and make a model each time
for i in range(len(filterRows)):
# create the model - this will create the TF graph as well as load the data
m = Model(int(np.floor(sixSecondsNoDownsampling) / downsamplingRate), X_train, y_train, X_val, y_val, reverbSamples, filterRows[i], filterCols[i], k1s[i], learningRates[i], False)
# actually train the model (on the data it already loaded)
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers, best_weights, best_biases] = m.train(batchSize, numBatches, print_freq)
# store the new data
train_acc_lists.append(train_acc_list)
val_acc_lists.append(val_acc_list)
train_err_lists.append(train_err_list)
val_err_lists.append(val_err_list)
epoch_number_lists.append(epoch_numbers)
best_weights_lists.append(best_weights)
best_biases_lists.append(best_biases)
# grab these to store when we save the model
num_freq = m.num_freq
num_frames = m.num_frames
del m # clear out the model to avoid huge buildup of memory
# printing
print("filterRows = %s"%(filterRows))
print("filterCols = %s"%(filterCols))
print("k1s = %s"%(k1s))
print("learningRates = %s"%(learningRates))
print("train_acc_lists = %s"%(str(train_acc_lists)))
print("val_acc_lists = %s"%(str(val_acc_lists)))
print("train_err_lists = %s"%(str(train_err_lists)))
print("val_err_lists = %s"%(str(val_err_lists)))
print("epoch_number_lists = %s"%(str(epoch_number_lists)))
print("best_weights_lists = %s"%(str(best_weights_lists)))
print("best_biases_lists = %s"%(str(best_biases_lists)))
bestValues = [min(curList) for curList in val_err_lists]
print("bestValues = %s"%str(bestValues))
modelFiles = ['exp9_Model_SingleLayerCNN_%gx%g_k1=%g_LR=%f_Epochs=%g'%(filterRows[i], filterCols[i], k1s[i], learningRates[i], numEpochs) for i in range(len(filterRows))]
for i in range(len(filterRows)):
learningRate = learningRates[i]
# pull out the weights - separate into convolutional and softmax weights
curWeights = best_weights_lists[i]
curW_conv1 = np.squeeze(curWeights[0], axis=2)
curW_sm = np.squeeze(curWeights[1])
curBiases = best_biases_lists[i]
# just take the number of frames and frequencies from the most recent model, since they should all be the same
sp.savemat(modelFiles[i], {'W_conv1': curW_conv1, 'b_conv1': curBiases[0], 'W_sm': curW_sm, 'b_sm': curBiases[1], 'rows_in_region': num_freq, 'cols_in_region' : num_frames, 'learning_rate': learningRate, 'epochs': numEpochs, 'lowest_val_err': bestValues[i]})
# plotting
trainingPlotTitles = ['Single Layer CNN with %gx%g kernels and k1=%g, LR=%f'%(filterRows[i], filterCols[i], k1s[i], learningRates[i]) for i in range(len(filterRows))]
trainingPlotTitles.append('Exp 9 Single Layer CNN, Validation Cross-Entropy Cost vs. Epoch')
trainingPlotFiles = ['exp9_training_%gx%g_k1=%g_LR=%f_%gEpochs.png'%(filterRows[i], filterCols[i], k1s[i], learningRates[i], numEpochs) for i in range(len(filterRows))]
trainingPlotFiles.append('exp9_validationCurves_%gEpochs'%(numEpochs))
makeTrainingPlots(epoch_number_lists[0], zip(filterRows, filterCols), train_err_lists, val_err_lists, "Shape of Kernel", "Cross Entropy Cost", trainingPlotTitles, trainingPlotFiles)
|
mit
|
jreback/pandas
|
pandas/tests/scalar/timestamp/test_unary_ops.py
|
1
|
15498
|
from datetime import datetime
from dateutil.tz import gettz
import pytest
import pytz
from pytz import utc
from pandas._libs.tslibs import NaT, Timestamp, conversion, to_offset
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
import pandas.util._test_decorators as td
import pandas._testing as tm
class TestTimestampUnaryOps:
# --------------------------------------------------------------
# Timestamp.round
@pytest.mark.parametrize(
"timestamp, freq, expected",
[
("20130101 09:10:11", "D", "20130101"),
("20130101 19:10:11", "D", "20130102"),
("20130201 12:00:00", "D", "20130202"),
("20130104 12:00:00", "D", "20130105"),
("2000-01-05 05:09:15.13", "D", "2000-01-05 00:00:00"),
("2000-01-05 05:09:15.13", "H", "2000-01-05 05:00:00"),
("2000-01-05 05:09:15.13", "S", "2000-01-05 05:09:15"),
],
)
def test_round_frequencies(self, timestamp, freq, expected):
dt = Timestamp(timestamp)
result = dt.round(freq)
expected = Timestamp(expected)
assert result == expected
def test_round_tzaware(self):
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("D")
expected = Timestamp("20130101", tz="US/Eastern")
assert result == expected
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("s")
assert result == dt
def test_round_30min(self):
# round
dt = Timestamp("20130104 12:32:00")
result = dt.round("30Min")
expected = Timestamp("20130104 12:30:00")
assert result == expected
def test_round_subsecond(self):
# GH#14440 & GH#15578
result = Timestamp("2016-10-17 12:00:00.0015").round("ms")
expected = Timestamp("2016-10-17 12:00:00.002000")
assert result == expected
result = Timestamp("2016-10-17 12:00:00.00149").round("ms")
expected = Timestamp("2016-10-17 12:00:00.001000")
assert result == expected
ts = Timestamp("2016-10-17 12:00:00.0015")
for freq in ["us", "ns"]:
assert ts == ts.round(freq)
result = Timestamp("2016-10-17 12:00:00.001501031").round("10ns")
expected = Timestamp("2016-10-17 12:00:00.001501030")
assert result == expected
def test_round_nonstandard_freq(self):
with tm.assert_produces_warning(False):
Timestamp("2016-10-17 12:00:00.001501031").round("1010ns")
def test_round_invalid_arg(self):
stamp = Timestamp("2000-01-05 05:09:15.13")
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
stamp.round("foo")
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
("2117-01-01 00:00:45", "floor", "15s", "2117-01-01 00:00:45"),
("2117-01-01 00:00:45", "ceil", "15s", "2117-01-01 00:00:45"),
(
"2117-01-01 00:00:45.000000012",
"floor",
"10ns",
"2117-01-01 00:00:45.000000010",
),
(
"1823-01-01 00:00:01.000000012",
"ceil",
"10ns",
"1823-01-01 00:00:01.000000020",
),
("1823-01-01 00:00:01", "floor", "1s", "1823-01-01 00:00:01"),
("1823-01-01 00:00:01", "ceil", "1s", "1823-01-01 00:00:01"),
("NaT", "floor", "1s", "NaT"),
("NaT", "ceil", "1s", "NaT"),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = Timestamp(test_input)
func = getattr(dt, rounder)
result = func(freq)
if dt is NaT:
assert result is NaT
else:
expected = Timestamp(expected)
assert result == expected
@pytest.mark.parametrize(
"test_input, freq, expected",
[
("2018-01-01 00:02:06", "2s", "2018-01-01 00:02:06"),
("2018-01-01 00:02:00", "2T", "2018-01-01 00:02:00"),
("2018-01-01 00:04:00", "4T", "2018-01-01 00:04:00"),
("2018-01-01 00:15:00", "15T", "2018-01-01 00:15:00"),
("2018-01-01 00:20:00", "20T", "2018-01-01 00:20:00"),
("2018-01-01 03:00:00", "3H", "2018-01-01 03:00:00"),
],
)
@pytest.mark.parametrize("rounder", ["ceil", "floor", "round"])
def test_round_minute_freq(self, test_input, freq, expected, rounder):
# Ensure timestamps that shouldn't round dont!
# GH#21262
dt = Timestamp(test_input)
expected = Timestamp(expected)
func = getattr(dt, rounder)
result = func(freq)
assert result == expected
def test_ceil(self):
dt = Timestamp("20130101 09:10:11")
result = dt.ceil("D")
expected = Timestamp("20130102")
assert result == expected
def test_floor(self):
dt = Timestamp("20130101 09:10:11")
result = dt.floor("D")
expected = Timestamp("20130101")
assert result == expected
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
def test_round_dst_border_ambiguous(self, method):
# GH 18946 round near "fall back" DST
ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid")
#
result = getattr(ts, method)("H", ambiguous=True)
assert result == ts
result = getattr(ts, method)("H", ambiguous=False)
expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert(
"Europe/Madrid"
)
assert result == expected
result = getattr(ts, method)("H", ambiguous="NaT")
assert result is NaT
msg = "Cannot infer dst time"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
getattr(ts, method)("H", ambiguous="raise")
@pytest.mark.parametrize(
"method, ts_str, freq",
[
["ceil", "2018-03-11 01:59:00-0600", "5min"],
["round", "2018-03-11 01:59:00-0600", "5min"],
["floor", "2018-03-11 03:01:00-0500", "2H"],
],
)
def test_round_dst_border_nonexistent(self, method, ts_str, freq):
# GH 23324 round near "spring forward" DST
ts = Timestamp(ts_str, tz="America/Chicago")
result = getattr(ts, method)(freq, nonexistent="shift_forward")
expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago")
assert result == expected
result = getattr(ts, method)(freq, nonexistent="NaT")
assert result is NaT
msg = "2018-03-11 02:00:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
getattr(ts, method)(freq, nonexistent="raise")
@pytest.mark.parametrize(
"timestamp",
[
"2018-01-01 0:0:0.124999360",
"2018-01-01 0:0:0.125000367",
"2018-01-01 0:0:0.125500",
"2018-01-01 0:0:0.126500",
"2018-01-01 12:00:00",
"2019-01-01 12:00:00",
],
)
@pytest.mark.parametrize(
"freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"1D",
],
)
def test_round_int64(self, timestamp, freq):
# check that all rounding modes are accurate to int64 precision
# see GH#22591
dt = Timestamp(timestamp)
unit = to_offset(freq).nanos
# test floor
result = dt.floor(freq)
assert result.value % unit == 0, f"floor not a {freq} multiple"
assert 0 <= dt.value - result.value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
assert result.value % unit == 0, f"ceil not a {freq} multiple"
assert 0 <= result.value - dt.value < unit, "ceil error"
# test round
result = dt.round(freq)
assert result.value % unit == 0, f"round not a {freq} multiple"
assert abs(result.value - dt.value) <= unit // 2, "round error"
if unit % 2 == 0 and abs(result.value - dt.value) == unit // 2:
# round half to even
assert result.value // unit % 2 == 0, "round half to even error"
# --------------------------------------------------------------
# Timestamp.replace
def test_replace_naive(self):
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00")
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00")
assert result == expected
def test_replace_aware(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
ts = Timestamp("2016-01-01 09:00:00", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00", tz=tz)
assert result == expected
def test_replace_preserves_nanos(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00.000000123", tz=tz)
assert result == expected
def test_replace_multiple(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
# test all
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(
year=2015,
month=2,
day=2,
hour=0,
minute=5,
second=5,
microsecond=5,
nanosecond=5,
)
expected = Timestamp("2015-02-02 00:05:05.000005005", tz=tz)
assert result == expected
def test_replace_invalid_kwarg(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = r"replace\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
ts.replace(foo=5)
def test_replace_integer_args(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = "value must be an integer, received <class 'float'> for hour"
with pytest.raises(ValueError, match=msg):
ts.replace(hour=0.1)
def test_replace_tzinfo_equiv_tz_localize_none(self):
# GH#14621, GH#7825
# assert conversion to naive is the same as replacing tzinfo with None
ts = Timestamp("2013-11-03 01:59:59.999999-0400", tz="US/Eastern")
assert ts.tz_localize(None) == ts.replace(tzinfo=None)
@td.skip_if_windows
def test_replace_tzinfo(self):
# GH#15683
dt = datetime(2016, 3, 27, 1)
tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo
result_dt = dt.replace(tzinfo=tzinfo)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
@pytest.mark.parametrize(
"tz, normalize",
[
(pytz.timezone("US/Eastern"), lambda x: x.tzinfo.normalize(x)),
(gettz("US/Eastern"), lambda x: x),
],
)
def test_replace_across_dst(self, tz, normalize):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
ts_naive = Timestamp("2017-12-03 16:03:30")
ts_aware = conversion.localize_pydatetime(ts_naive, tz)
# Preliminary sanity-check
assert ts_aware == normalize(ts_aware)
# Replace across DST boundary
ts2 = ts_aware.replace(month=6)
# Check that `replace` preserves hour literal
assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute)
# Check that post-replace object is appropriately normalized
ts2b = normalize(ts2)
assert ts2 == ts2b
def test_replace_dst_border(self):
# Gh 7825
t = Timestamp("2013-11-3", tz="America/Chicago")
result = t.replace(hour=3)
expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago")
assert result == expected
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"])
def test_replace_dst_fold(self, fold, tz):
# GH 25017
d = datetime(2019, 10, 27, 2, 30)
ts = Timestamp(d, tz=tz)
result = ts.replace(hour=1, fold=fold)
expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize(
tz, ambiguous=not fold
)
assert result == expected
# --------------------------------------------------------------
# Timestamp.normalize
@pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"])
def test_normalize(self, tz_naive_fixture, arg):
tz = tz_naive_fixture
ts = Timestamp(arg, tz=tz)
result = ts.normalize()
expected = Timestamp("2013-11-30", tz=tz)
assert result == expected
def test_normalize_pre_epoch_dates(self):
# GH: 36294
result = Timestamp("1969-01-01 09:00:00").normalize()
expected = Timestamp("1969-01-01 00:00:00")
assert result == expected
# --------------------------------------------------------------
@td.skip_if_windows
def test_timestamp(self):
# GH#17329
# tz-naive --> treat it as if it were UTC for purposes of timestamp()
ts = Timestamp.now()
uts = ts.replace(tzinfo=utc)
assert ts.timestamp() == uts.timestamp()
tsc = Timestamp("2014-10-11 11:00:01.12345678", tz="US/Central")
utsc = tsc.tz_convert("UTC")
# utsc is a different representation of the same time
assert tsc.timestamp() == utsc.timestamp()
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
# should agree with datetime.timestamp method
dt = ts.to_pydatetime()
assert dt.timestamp() == ts.timestamp()
@pytest.mark.parametrize("fold", [0, 1])
def test_replace_preserves_fold(fold):
# GH 37610. Check that replace preserves Timestamp fold property
tz = gettz("Europe/Moscow")
ts = Timestamp(year=2009, month=10, day=25, hour=2, minute=30, fold=fold, tzinfo=tz)
ts_replaced = ts.replace(second=1)
assert ts_replaced.fold == fold
|
bsd-3-clause
|
procoder317/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
anntzer/scikit-learn
|
sklearn/cluster/_spectral.py
|
3
|
23383
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import _deprecate_positional_args
from ..utils.deprecation import deprecated
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph, NearestNeighbors
from ..manifold import spectral_embedding
from ._kmeans import k_means
@_deprecate_positional_args
def discretize(vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
@_deprecate_positional_args
def spectral_clustering(affinity, *, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans',
verbose=False):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# The first eigen vector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if verbose:
print(f'Computing label assignment using {assign_labels}')
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init, verbose=verbose)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(ClusterMixin, BaseEstimator):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
n_components : int, default=n_clusters
Number of eigen vectors to use for the spectral embedding
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when ``eigen_solver='amg'`` and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : str or callable, default='rbf'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix using a radial basis function
(RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : int, default=10
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when ``eigen_solver='arpack'``.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict of str to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, default=None
The number of parallel jobs to run when `affinity='nearest_neighbors'`
or `affinity='precomputed_nearest_neighbors'`. The neighbors search
will be done in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Attributes
----------
affinity_matrix_ : array-like of shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels="discretize",
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
@_deprecate_positional_args
def __init__(self, n_clusters=8, *, eigen_solver=None, n_components=None,
random_state=None, n_init=10, gamma=1., affinity='rbf',
n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans',
degree=3, coef0=1, kernel_params=None, n_jobs=None,
verbose=False):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
allow_squared = self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,
include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed_nearest_neighbors':
estimator = NearestNeighbors(n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs,
metric="precomputed").fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode='connectivity')
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels,
verbose=self.verbose)
return self
def fit_predict(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix,
and return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
def _more_tags(self):
return {'pairwise': self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated("Attribute _pairwise was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def _pairwise(self):
return self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
|
bsd-3-clause
|
lukas/scikit-class
|
examples/scikit/classifier-svm.py
|
2
|
1245
|
import pandas as pd
import numpy as np
# Get a pandas DataFrame object of all the data in the csv file:
df = pd.read_csv('tweets.csv')
# Get pandas Series object of the "tweet text" column:
text = df['tweet_text']
# Get pandas Series object of the "emotion" column:
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
# The rows of the "emotion" column have one of three strings:
# 'Positive emotion'
# 'Negative emotion'
# 'No emotion toward brand or product'
# Remove the blank rows from the series:
target = target[pd.notnull(text)]
text = text[pd.notnull(text)]
# Perform feature extraction:
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
count_vect.fit(text)
counts = count_vect.transform(text)
# Train with this data with an svm:
from sklearn.svm import SVC
clf = SVC()
clf.fit(counts, target)
#Try the classifier
print(clf.predict(count_vect.transform(['i do not love my iphone'])))
# See what the classifier predicts for some new tweets:
#for tweet in ('I love my iphone!!!', 'iphone costs too much!!!', 'the iphone is not good', 'I like turtles'):
# print('Tweet: ' + tweet)
# print('Prediction: ' + str(clf.predict(count_vect.transform([tweet]))))
# print('\n')
|
gpl-2.0
|
winklerand/pandas
|
pandas/tests/io/json/test_compression.py
|
3
|
4621
|
import pytest
import moto
import pandas as pd
from pandas import compat
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_raises_regex
COMPRESSION_TYPES = [None, 'bz2', 'gzip', 'xz']
def decompress_file(path, compression):
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.open(path, 'rb')
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
result = f.read().decode('utf8')
f.close()
return result
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
uncompressed_content = decompress_file(path, compression)
assert_frame_equal(df, pd.read_json(uncompressed_content))
def test_compress_zip_value_error():
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip")
def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = tm.get_data_path("tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
if compression == 'xz':
tm._skip_if_no_lzma()
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_lines_with_compression(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.read_json(path, lines=True,
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_chunksize_with_compression(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1,
compression=compression))
assert_frame_equal(df, roundtripped_df)
def test_write_unsupported_compression_type():
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, df.to_json,
path, compression="unsupported")
def test_read_unsupported_compression_type():
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, pd.read_json,
path, compression="unsupported")
|
bsd-3-clause
|
ivano666/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
|
1
|
6597
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns:
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class _NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size *= args[0].shape[0]
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[:train_size]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
NotFittedError = _NotFittedError
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
NotFittedError = _NotFittedError
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
|
apache-2.0
|
kate-v-stepanova/scilifelab
|
tests/full/test_production.py
|
4
|
21220
|
import shutil
import os
import logbook
import re
import yaml
import unittest
import pandas as pd
import numpy as np
try:
import drmaa
except:
pass
from ..classes import SciLifeTest
from classes import PmFullTest
from cement.core import handler
from scilifelab.pm.core.production import ProductionController
from scilifelab.utils.misc import filtered_walk, opt_to_dict
from scilifelab.bcbio.run import find_samples, setup_sample, run_bcbb_command, setup_merged_samples, sample_table, get_vcf_files, validate_sample_directories, _group_samples
from scilifelab.bcbio import merge_sample_config
LOG = logbook.Logger(__name__)
filedir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
j_doe_00_01 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_01"))
j_doe_00_04 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_04"))
j_doe_00_05 = os.path.abspath(os.path.join(filedir, "data", "production", "J.Doe_00_05"))
ANALYSIS_TYPE = 'Align_standard_seqcap'
GALAXY_CONFIG = os.path.abspath(os.path.join(filedir, "data", "config"))
SAMPLES = ['P001_101_index3', 'P001_102_index6']
FLOWCELL = '120924_AC003CCCXX'
FINISHED = {
'J.Doe_00_01': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[0], "FINISHED_AND_DELIVERED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[1], "FINISHED_AND_DELIVERED")},
'J.Doe_00_04': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[0], "FINISHED_AND_DELIVERED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[1], "FINISHED_AND_DELIVERED")}
}
REMOVED = {
'J.Doe_00_01': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[0], "FINISHED_AND_REMOVED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_01", SAMPLES[1], "FINISHED_AND_REMOVED")},
'J.Doe_00_04': {'P001_101_index3': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[0], "FINISHED_AND_REMOVED"),
'P001_102_index6': os.path.join(filedir, "data", "production", "J.Doe_00_04", SAMPLES[1], "FINISHED_AND_REMOVED")}
}
class ProductionConsoleTest(PmFullTest):
"""Class for testing functions without drmaa"""
def setUp(self):
pass
def test_compression_suite(self):
"""Test various combinations of compression, decompression, cleaning"""
self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l1 = self.app._output_data["stderr"].getvalue()
self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '-f', FLOWCELL, '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l2 = self.app._output_data["stderr"].getvalue()
self.assertTrue(len(l1) > len(l2))
os.chdir(filedir)
def test_run(self):
"""Test various combinations of compression, decompression, cleaning"""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_01', '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l1 = self.app._output_data["stderr"].getvalue()
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_01', '-f', FLOWCELL, '--debug', '--force', '--fastq', '-n'])
handler.register(ProductionController)
self._run_app()
l2 = self.app._output_data["stderr"].getvalue()
self.assertTrue(len(l1) > len(l2))
os.chdir(filedir)
@unittest.skipIf(not os.getenv("MAILTO"), "not running production test: set $MAILTO environment variable to your mail address to test mailsend")
@unittest.skipIf(not os.getenv("DRMAA_LIBRARY_PATH"), "not running production test: no $DRMAA_LIBRARY_PATH")
class ProductionTest(PmFullTest):
@classmethod
def setUpClass(cls):
if not os.getcwd() == filedir:
os.chdir(filedir)
LOG.info("Copy tree {} to {}".format(j_doe_00_01, j_doe_00_04))
if not os.path.exists(j_doe_00_04):
shutil.copytree(j_doe_00_01, j_doe_00_04)
## Set P001_102_index6 to use devel partition and require mailto environment variable for test
pp = os.path.join(j_doe_00_04, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
platform_args[platform_args.index("-t") + 1] = "00:10:00"
if not "--mail-user={}".format(os.getenv("MAILTO")) in platform_args:
platform_args.extend(["--mail-user={}".format(os.getenv("MAILTO"))])
if not "--mail-type=ALL" in platform_args:
platform_args.extend(["--mail-type=ALL"])
config["distributed"]["platform_args"] = " ".join(platform_args)
with open(pp, "w") as fh:
fh.write(yaml.safe_dump(config, default_flow_style=False, allow_unicode=True, width=1000))
for k in FINISHED.keys():
for v in FINISHED[k].values():
if os.path.exists(v):
os.unlink(v)
for v in REMOVED[k].values():
if os.path.exists(v):
os.unlink(v)
## FIXME: since we're submitting jobs to drmaa, data will be
## removed before the pipeline has finished. One solution would be
## to run on one of the module production datasets
# @classmethod
# def tearDownClass(cls):
# LOG.info("Removing directory tree {}".format(j_doe_00_04))
# os.chdir(filedir)
# shutil.rmtree(j_doe_00_04)
def test_production_setup(self):
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--only_setup', '--restart', '--drmaa'], extensions = ['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_production(self):
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_platform_args(self):
"""Test the platform arguments for a run"""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa'], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_batch_submission(self):
"""Test that adding --batch groups commands into a batch submission"""
pp = os.path.join(j_doe_00_04, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
account = platform_args[platform_args.index("-A")+1]
self.app = self.make_app(argv = ['production', 'compress', 'J.Doe_00_04', '--debug', '--force', '--jobname', 'batchsubmission', '--drmaa', '--batch', '--partition', 'devel', '--time', '01:00:00', '-A', account], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
def test_change_platform_args(self):
"""Test that passing --time actually changes platform
arguments. These arguments should have precedence over
whatever is written in the config file."""
self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa', '--time', '00:01:00', '-n'], extensions=['scilifelab.pm.ext.ext_distributed'])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
def test_casava_transfer(self):
"""Test transfer of casava data from production to project"""
self.app = self.make_app(argv = ['production', 'transfer', 'J.Doe_00_03', '--debug', '--force', '--quiet'], extensions=[])
handler.register(ProductionController)
self._run_app()
os.chdir(filedir)
j_doe_00_03 = os.path.abspath(os.path.join(filedir, "data", "projects", "j_doe_00_03"))
pattern = ".fastq(.gz)?$"
def fastq_filter(f):
return re.search(pattern, f) != None
fastq_files = filtered_walk(j_doe_00_03, fastq_filter)
self.assertEqual(len(fastq_files), 2)
def test_touch_finished(self):
"""Test touching finished files"""
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', SAMPLES[0]], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_01'][SAMPLES[0]]))
samplefile = os.path.join(filedir, "data", "production", "J.Doe_00_01", "finished_sample.txt")
with open(samplefile, "w") as fh:
fh.write(SAMPLES[0] + "\n")
fh.write(SAMPLES[1] + "\n")
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', samplefile], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_01'][SAMPLES[1]]))
## Make sure rsync fails
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_01', '--debug', '--force', '--sample', samplefile], extensions=[])
handler.register(ProductionController)
try:
self.app.setup()
self.app.config.set("runqc", "root", self.app.config.get("runqc", "root").replace("production", "projects"))
with self.app.log.log_setup.applicationbound():
self.app.run()
self.app.render(self.app._output_data)
finally:
self.app.close()
def test_remove_finished(self):
self.app = self.make_app(argv = ['production', 'touch-finished', 'J.Doe_00_04', '--debug', '--force', '--sample', SAMPLES[1]], extensions=[])
handler.register(ProductionController)
self._run_app()
self.assertTrue(os.path.exists(FINISHED['J.Doe_00_04'][SAMPLES[1]]))
## Remove file, dry
self.app = self.make_app(argv = ['production', 'remove-finished', 'J.Doe_00_04', '--debug', '--force', '-n'], extensions=[])
handler.register(ProductionController)
self._run_app()
class UtilsTest(SciLifeTest):
@classmethod
def setUpClass(cls):
if not os.getcwd() == filedir:
os.chdir(filedir)
LOG.info("Copy tree {} to {}".format(j_doe_00_01, j_doe_00_05))
if not os.path.exists(j_doe_00_05):
shutil.copytree(j_doe_00_01, j_doe_00_05)
with open(os.path.join(j_doe_00_05, "samples.txt"), "w") as fh:
fh.write("\n\nP001_101_index3\nP001_104_index3")
with open(os.path.join(j_doe_00_05, "samples2.txt"), "w") as fh:
fh.write("\n\nP001_101_index3-bcbb-config.yaml")
@classmethod
def tearDownClass(cls):
LOG.info("Removing directory tree {}".format(j_doe_00_05))
os.chdir(filedir)
shutil.rmtree(j_doe_00_05)
def test_find_samples(self):
"""Test finding samples"""
flist = find_samples(j_doe_00_05)
self.assertIn(len(flist), [3,4])
flist = find_samples(j_doe_00_05, **{'only_failed':True})
self.assertIn(len(flist), [0,1])
def test_find_samples_from_file(self):
"""Find samples defined in file with empty lines and erroneous names"""
with open(os.path.join(j_doe_00_05, "P001_101_index3-bcbb-config.yaml"), "w") as fh:
fh.write("\n")
flist = find_samples(j_doe_00_05, sample=os.path.join(j_doe_00_05, "samples.txt"))
validate_sample_directories(flist, j_doe_00_05)
self.assertEqual(len(flist),2)
os.unlink(os.path.join(j_doe_00_05, "P001_101_index3-bcbb-config.yaml"))
def test_find_samples_from_file_with_yaml(self):
"""Find samples defined in file with empty lines and a bcbb-config.yaml file lying directly under root directory"""
flist = find_samples(j_doe_00_05, sample=os.path.join(j_doe_00_05, "samples2.txt"))
args = [flist, j_doe_00_05]
self.assertRaises(Exception, validate_sample_directories, *args)
def test_setup_merged_samples(self):
"""Test setting up merged samples"""
flist = find_samples(j_doe_00_05)
setup_merged_samples(flist, **{'dry_run':False})
with open(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3-bcbb-config.yaml")) as fh:
conf = yaml.load(fh)
self.assertEqual(conf["details"][0]["files"][0], os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz"))
def test_merge_sample_config(self):
"""Test merging sample configuration files"""
flist = find_samples(j_doe_00_05)
fdict = _group_samples(flist)
out_d = os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL")
if not os.path.exists(out_d):
os.makedirs(out_d)
newconf = merge_sample_config(fdict["P001_101_index3"].values(), "P001_101_index3", out_d=out_d, dry_run=False)
self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz" )))
self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, "P001_101_index3", "TOTAL", "P001_101_index3_C003CCCXX_TGACCA_L001_R1_001.fastq.gz" )))
def test_setup_samples(self):
"""Test setting up samples, changing genome to rn4"""
flist = find_samples(j_doe_00_05)
for f in flist:
setup_sample(f, **{'analysis':'Align_standard_seqcap', 'genome_build':'rn4', 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'num_cores':8, 'distributed':False})
for f in flist:
with open(f, "r") as fh:
config = yaml.load(fh)
if config["details"][0].get("multiplex", None):
self.assertEqual(config["details"][0]["multiplex"][0]["genome_build"], "rn4")
else:
self.assertEqual(config["details"][0]["genome_build"], "rn4")
with open(f.replace("-bcbb-config.yaml", "-post_process.yaml")) as fh:
config = yaml.load(fh)
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["hybrid_bait"], 'rat_baits.interval_list')
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["hybrid_target"], 'rat_targets.interval_list')
self.assertEqual(config["algorithm"]["num_cores"], 8)
for f in flist:
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':True, 'google_report':True,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})
with open(f, "r") as fh:
config = yaml.load(fh)
if config["details"][0].get("multiplex", None):
self.assertEqual(config["details"][0]["multiplex"][0]["genome_build"], "rn4")
else:
self.assertEqual(config["details"][0]["genome_build"], "rn4")
with open(f.replace("-bcbb-config.yaml", "-post_process.yaml")) as fh:
config = yaml.load(fh)
self.assertEqual(config["algorithm"]["mark_duplicates"], False)
self.assertEqual(config["custom_algorithms"][ANALYSIS_TYPE]["mark_duplicates"], False)
def test_remove_files(self):
"""Test removing files"""
keep_files = ["-post_process.yaml$", "-post_process.yaml.bak$", "-bcbb-config.yaml$", "-bcbb-config.yaml.bak$", "-bcbb-command.txt$", "-bcbb-command.txt.bak$", "_[0-9]+.fastq$", "_[0-9]+.fastq.gz$", "^[0-9][0-9]_.*.txt$"]
pattern = "|".join(keep_files)
def remove_filter_fn(f):
return re.search(pattern, f) == None
flist = find_samples(j_doe_00_05)
for f in flist:
workdir = os.path.dirname(f)
remove_files = filtered_walk(workdir, remove_filter_fn)
self.assertNotIn("01_analysis_start.txt", [os.path.basename(x) for x in remove_files])
def test_remove_dirs(self):
"""Test removing directories before rerunning pipeline"""
keep_files = ["-post_process.yaml$", "-post_process.yaml.bak$", "-bcbb-config.yaml$", "-bcbb-config.yaml.bak$", "-bcbb-command.txt$", "-bcbb-command.txt.bak$", "_[0-9]+.fastq$", "_[0-9]+.fastq.gz$"]
pattern = "|".join(keep_files)
def remove_filter_fn(f):
return re.search(pattern, f) == None
flist = find_samples(j_doe_00_05)
for f in flist:
workdir = os.path.dirname(f)
remove_dirs = filtered_walk(workdir, remove_filter_fn, get_dirs=True)
self.assertIn("fastqc", [os.path.basename(x) for x in remove_dirs])
def test_bcbb_command(self):
"""Test output from command, changing analysis to amplicon and
setting targets and baits"""
flist = find_samples(j_doe_00_05)
for f in flist:
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':False, 'google_report':False,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})
with open(f.replace("-bcbb-config.yaml", "-bcbb-command.txt")) as fh:
cl = fh.read().split()
(cl, platform_args) = run_bcbb_command(f)
self.assertIn("automated_initial_analysis.py",cl)
setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,
'no_only_run':False, 'google_report':False,
'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':True})
with open(f.replace("-bcbb-config.yaml", "-bcbb-command.txt")) as fh:
cl = fh.read().split()
(cl, platform_args) = run_bcbb_command(f)
self.assertIn("distributed_nextgen_pipeline.py",cl)
def test_global_post_process(self):
"""Test that when using a "global" post_process, jobname,
output, error and output directory are updated.
"""
flist = find_samples(j_doe_00_05)
pp = os.path.join(j_doe_00_01, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
postprocess = yaml.load(fh)
for f in flist:
(cl, platform_args) = run_bcbb_command(f, pp)
self.assertIn("--error", platform_args)
self.assertEqual(platform_args[platform_args.index("--error") + 1], f.replace("-bcbb-config.yaml", "-bcbb.err"))
@unittest.skipIf(not os.getenv("DRMAA_LIBRARY_PATH"), "not running UtilsTest.test_platform: no $DRMAA_LIBRARY_PATH")
def test_platform_args(self):
"""Test making platform args and changing them on the fly. """
from scilifelab.pm.ext.ext_distributed import make_job_template_args
pp = os.path.join(j_doe_00_05, SAMPLES[1], FLOWCELL, "{}-post_process.yaml".format(SAMPLES[1]))
with open(pp) as fh:
config = yaml.load(fh)
platform_args = config["distributed"]["platform_args"].split()
self.assertIn("core", platform_args)
pargs = opt_to_dict(platform_args)
self.assertEqual("P001_102_index6-bcbb.log", pargs['-o'])
kw = {'time':'00:01:00', 'jobname':'test', 'partition':'devel'}
pargs = make_job_template_args(pargs, **kw)
self.assertEqual("devel", pargs['partition'])
nativeSpec = "-t {time} -p {partition} -A {account}".format(**pargs)
self.assertEqual("00:01:00", nativeSpec[3:11])
def test_sample_table(self):
"""Test making a sample table"""
flist = find_samples(j_doe_00_01)
samples = sample_table(flist)
grouped = samples.groupby("sample")
self.assertEqual(len(grouped.groups["P001_101_index3"]), 2)
self.assertEqual(len(grouped.groups["P001_102_index6"]), 1)
def test_summarize_variants(self):
"""Test summarizing variants"""
flist = find_samples(j_doe_00_01)
vcf_d = get_vcf_files(flist)
|
mit
|
jreback/pandas
|
pandas/tests/indexes/multi/test_constructors.py
|
1
|
26477
|
from datetime import date, datetime
import itertools
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range
import pandas._testing as tm
def test_constructor_single_level():
result = MultiIndex(
levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"]
)
assert isinstance(result, MultiIndex)
expected = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["first"]
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ["one", "two"]]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (["foo"], ["bar"])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(
levels=[[1, 2], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=("foo", "bar"),
)
renamed = [["foor"], ["barr"]]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (
r"On level 0, code max \(3\) >= length of level \(1\)\. "
"NOTE: this index is in an inconsistent state"
)
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([["a"], ["b"]])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(
levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]]
)
expected = MultiIndex(
levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]]
)
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]]
)
expected = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]]
)
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]
).set_levels([[np.nan, "s", pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]]
).set_codes([[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [
np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)
]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp("20130101")], ["a", "b"]])
assert result.levels[0].equals(Index([Timestamp("20130101")]))
assert result.levels[1].equals(Index(["a", "b"]))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [
np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)
]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(
tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes)
)
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx2 = date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo")
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range("1 days", freq="D", periods=3)
idx2 = pd.timedelta_range("2 hours", freq="H", periods=3)
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range("2011-01-01", freq="D", periods=3)
idx2 = pd.period_range("2015-01-01", freq="H", periods=3)
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx2 = date_range("2015-01-01 10:00", freq="H", periods=3)
idx3 = pd.timedelta_range("1 days", freq="D", periods=3)
idx4 = pd.period_range("2011-01-01", freq="D", periods=3)
result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = MultiIndex.from_arrays(
[Series(idx1), Series(idx2), Series(idx3), Series(idx4)]
)
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True)
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=["A"])
assert isinstance(result, MultiIndex)
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["A"]
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list("ABC")[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"invalid_sequence_of_arrays",
[
1,
[1],
[1, 2],
[[1], 2],
[1, [2]],
"a",
["a"],
["a", "b"],
[["a"], "b"],
(1,),
(1, 2),
([1], 2),
(1, [2]),
"a",
("a",),
("a", "b"),
(["a"], "b"),
[(1,), 2],
[1, (2,)],
[("a",), "b"],
((1,), 2),
(1, (2,)),
(("a",), "b"),
],
)
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize(
"idx1, idx2", [([1, 2, 3], ["a", "b"]), ([], ["a", "b"]), ([1, 2, 3], [])]
)
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = "^all arrays must be same length$"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
def test_from_arrays_respects_none_names():
# GH27292
a = Series([1, 2, 3], name="foo")
b = Series(["a", "b", "c"], name="bar")
result = MultiIndex.from_arrays([a, b], names=None)
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = "Cannot infer number of levels from empty list"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(
levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
)
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=["a", "b"])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(
levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
)
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=["a", "b"])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = "Input must be a list / sequence of tuple-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=["a", "b"])
expected = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
Index(li, name="abc")
with pytest.raises(ValueError, match=msg):
Index(li, name="a")
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame(
[[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"]
).set_index(["a", "b"])
idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b"))
result = pd.DataFrame([2, 3], columns=["c"], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=["A"])
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["A"]
@pytest.mark.parametrize(
"first, second", [([], []), (["foo", "bar", "baz"], []), ([], ["a", "b", "c"])]
)
def test_from_product_empty_two_levels(first, second):
names = ["A", "B"]
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second], codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("N", list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ["A", "B", "C"]
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]]
)
def test_from_product_invalid_input(invalid_input):
msg = r"Input must be a list / sequence of iterables|Input must be list-like"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range("2000-01-01", periods=2)
mi = MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike(
[
(1, Timestamp("2000-01-01")),
(1, Timestamp("2000-01-02")),
(2, Timestamp("2000-01-01")),
(2, Timestamp("2000-01-02")),
]
)
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_rangeindex():
# RangeIndex is preserved by factorize, so preserved in levels
rng = Index(range(5))
other = ["a", "b"]
mi = MultiIndex.from_product([rng, other])
tm.assert_index_equal(mi._levels[0], rng, exact=True)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ["foo", "bar"]
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered)
expected = pd.CategoricalIndex(
list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered
)
result = MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ["foo", "bar", "buz"]
second = ["a", "b", "c"]
names = ["first", "second"]
result = MultiIndex.from_product([first, second], names=names)
tuples = [
("foo", "a"),
("foo", "b"),
("foo", "c"),
("bar", "a"),
("bar", "b"),
("bar", "c"),
("buz", "a"),
("buz", "b"),
("buz", "c"),
]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ["foo", "bar", "buz"]
second = ["a", "b", "c"]
names = ["first", "second"]
tuples = [
("foo", "a"),
("foo", "b"),
("foo", "c"),
("bar", "a"),
("bar", "b"),
("bar", "c"),
("buz", "a"),
("buz", "b"),
("buz", "c"),
]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
msg = "Input must be a list / sequence of iterables."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(0)
@pytest.mark.parametrize(
"a, b, expected_names",
[
(
Series([1, 2, 3], name="foo"),
Series(["a", "b"], name="bar"),
["foo", "bar"],
),
(Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]),
([1, 2, 3], ["a", "b"], None),
],
)
def test_from_product_infer_names(a, b, expected_names):
# GH27292
result = MultiIndex.from_product([a, b])
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b"]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=expected_names,
)
tm.assert_index_equal(result, expected)
def test_from_product_respects_none_names():
# GH27292
a = Series([1, 2, 3], name="foo")
b = Series(["a", "b"], name="bar")
result = MultiIndex.from_product([a, b], names=None)
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b"]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=None,
)
tm.assert_index_equal(result, expected)
def test_from_product_readonly():
# GH#15286 passing read-only array to from_product
a = np.array(range(3))
b = ["a", "b"]
expected = MultiIndex.from_product([a, b])
a.setflags(write=False)
result = MultiIndex.from_product([a, b])
tm.assert_index_equal(result, expected)
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
index = idx
index.names = ["foo", "bar"]
result = Index(index)
expected = Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
)
)
tm.assert_index_equal(result, expected)
result = Index(index, name="A")
expected = Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
name="A",
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_frame
# ----------------------------------------------------------------------------
def test_from_frame():
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"]
)
expected = MultiIndex.from_tuples(
[("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"]
)
result = MultiIndex.from_frame(df)
tm.assert_index_equal(expected, result)
@pytest.mark.parametrize(
"non_frame",
[
Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
Index([1, 2, 3, 4]),
np.array([[1, 2], [3, 4], [5, 6]]),
27,
],
)
def test_from_frame_error(non_frame):
# GH 22420
with pytest.raises(TypeError, match="Input must be a DataFrame"):
MultiIndex.from_frame(non_frame)
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(
{
"dates": date_range("19910905", periods=6, tz="US/Eastern"),
"a": [1, 1, 1, 2, 2, 2],
"b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
"c": ["x", "x", "y", "z", "x", "y"],
}
)
original_dtypes = df.dtypes.to_dict()
expected_mi = MultiIndex.from_arrays(
[
date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
],
names=["dates", "a", "b", "c"],
)
mi = MultiIndex.from_frame(df)
mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
tm.assert_index_equal(expected_mi, mi)
assert original_dtypes == mi_dtypes
@pytest.mark.parametrize(
"names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])]
)
def test_from_frame_valid_names(names_in, names_out):
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]],
columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]),
)
mi = MultiIndex.from_frame(df, names=names_in)
assert mi.names == names_out
@pytest.mark.parametrize(
"names,expected_error_msg",
[
("bad_input", "Names should be list-like for a MultiIndex"),
(["a", "b", "c"], "Length of names must match number of levels in MultiIndex"),
],
)
def test_from_frame_invalid_names(names, expected_error_msg):
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]],
columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]),
)
with pytest.raises(ValueError, match=expected_error_msg):
MultiIndex.from_frame(df, names=names)
def test_index_equal_empty_iterable():
# #16844
a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"])
b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"])
tm.assert_index_equal(a, b)
def test_raise_invalid_sortorder():
# Test that the MultiIndex constructor raise when a incorrect sortorder is given
# GH#28518
levels = [[0, 1], [0, 1, 2]]
# Correct sortorder
MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2
)
with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"):
MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2
)
with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"):
MultiIndex(
levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1
)
def test_datetimeindex():
idx1 = pd.DatetimeIndex(
["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo"
)
idx2 = date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern")
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(
["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo"
)
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = np.datetime64("today")
date2 = datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]):
index = MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
# but NOT date objects, matching Index behavior
date4 = date.today()
index = MultiIndex.from_product([[date4], [date2]])
assert not isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz():
index = pd.DatetimeIndex(
["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific"
)
columns = pd.DatetimeIndex(
["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo"
)
result = MultiIndex.from_arrays([index, columns])
assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_multiindex_inference_consistency():
# check that inference behavior matches the base class
v = date.today()
arr = [v, v]
idx = Index(arr)
assert idx.dtype == object
mi = MultiIndex.from_arrays([arr])
lev = mi.levels[0]
assert lev.dtype == object
mi = MultiIndex.from_product([arr])
lev = mi.levels[0]
assert lev.dtype == object
mi = MultiIndex.from_tuples([(x,) for x in arr])
lev = mi.levels[0]
assert lev.dtype == object
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/metrics/tests/test_classification.py
|
10
|
62931
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='binary')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef_against_jurman():
# Check that the multiclass matthews_corrcoef agrees with the definition
# presented in Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC
# and CEN Error Measures in MultiClass Prediction
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
sample_weight = rng.rand(20)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
N = len(C)
cov_ytyp = sum([
C[k, k] * C[m, l] - C[l, k] * C[k, m]
for k in range(N) for m in range(N) for l in range(N)
])
cov_ytyt = sum([
C[:, k].sum() *
np.sum([C[g, f] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
cov_ypyp = np.sum([
C[k, :].sum() *
np.sum([C[f, g] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
mcc_jurman = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
mcc_ours = matthews_corrcoef(y_true, y_pred, sample_weight)
assert_almost_equal(mcc_ours, mcc_jurman, 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"])
y_true_inv2 = np.where(y_true_inv2, 'a', 'b')
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, ['a'] * len(y_true))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_matthews_corrcoef_multiclass():
rng = np.random.RandomState(0)
ord_a = ord('a')
n_classes = 4
y_true = [chr(ord_a + i) for i in rng.randint(0, n_classes, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# with multiclass > 2 it is not possible to achieve -1
y_true = [0, 0, 1, 1, 2, 2]
y_pred_bad = [2, 2, 0, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_bad), -.5)
# Maximizing false positives and negatives minimizes the MCC
# The minimum will be different for depending on the input
y_true = [0, 0, 1, 1, 2, 2]
y_pred_min = [1, 1, 0, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_min),
-12 / np.sqrt(24 * 16))
# Zero variance will result in an mcc of zero and a Runtime Warning
y_true = [0, 1, 2]
y_pred = [3, 3, 3]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred)
assert_almost_equal(mcc, 0.0)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [0, 1, 2, 0, 1, 2, 0, 1, 2]
y_2 = [1, 1, 1, 2, 2, 2, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# We can test that binary assumptions hold using the multiclass computation
# by masking the weight of samples not in the first two classes
# Masking the last label should let us get an MCC of -1
y_true = [0, 0, 1, 1, 2]
y_pred = [1, 1, 0, 0, 2]
sample_weight = [1, 1, 1, 1, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred, sample_weight), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
y_true = [0, 0, 1, 2]
y_pred = [0, 0, 1, 2]
sample_weight = [1, 1, 0, 0]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred,
sample_weight)
# But will output 0
assert_almost_equal(mcc, 0.)
def test_matthews_corrcoef_overflow():
# https://github.com/scikit-learn/scikit-learn/issues/9622
rng = np.random.RandomState(20170906)
def mcc_safe(y_true, y_pred):
conf_matrix = confusion_matrix(y_true, y_pred)
true_pos = conf_matrix[1, 1]
false_pos = conf_matrix[1, 0]
false_neg = conf_matrix[0, 1]
n_points = len(y_true)
pos_rate = (true_pos + false_neg) / n_points
activity = (true_pos + false_pos) / n_points
mcc_numerator = true_pos / n_points - pos_rate * activity
mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate)
return mcc_numerator / np.sqrt(mcc_denominator)
def random_ys(n_points): # binary
x_true = rng.random_sample(n_points)
x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5)
y_true = (x_true > 0.5)
y_pred = (x_pred > 0.5)
return y_true, y_pred
for n_points in [100, 10000, 1000000]:
arr = np.repeat([0., 1.], n_points) # binary
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
arr = np.repeat([0., 1., 2.], n_points) # multiclass
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
y_true, y_pred = random_ys(n_points)
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
mcc_safe(y_true, y_pred))
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
assert_raises(ValueError, confusion_matrix, y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_confusion_matrix_dtype():
y = [0, 1, 1]
weight = np.ones(len(y))
# confusion_matrix returns int64 by default
cm = confusion_matrix(y, y)
assert_equal(cm.dtype, np.int64)
# The dtype of confusion_matrix is always 64 bit
for dtype in [np.bool_, np.int32, np.uint64]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.int64)
for dtype in [np.float32, np.float64, None, object]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.float64)
# np.iinfo(np.uint32).max should be accumulated correctly
weight = np.ones(len(y), dtype=np.uint32) * 4294967295
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 4294967295)
assert_equal(cm[1, 1], 8589934590)
# np.iinfo(np.int64).max should cause an overflow
weight = np.ones(len(y), dtype=np.int64) * 9223372036854775807
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 9223372036854775807)
assert_equal(cm[1, 1], -2)
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, target_names=target_names)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1])
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single positive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='binary')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
for y_true, y_pred, y_type in [
(y_true_mc, y_pred_mc, 'multiclass'),
(y_true_ind, y_pred_ind, 'multilabel-indicator'),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
assert_raise_message(ValueError,
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type,
metric, y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Classification metrics can't handle a mix of {0} and {1} "
"targets".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary():
# https://github.com/scikit-learn/scikit-learn/issues/8098
y_true = [0, 1]
y_pred = [0, -1]
assert_equal(_check_targets(y_true, y_pred)[0], 'multiclass')
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = ('y_true contains only one label (2). Please provide '
'the true labels explicitly through the labels argument.')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
# calculate even if only single class in y_true (#6980)
assert_almost_equal(brier_score_loss([0], [0.5]), 0.25)
assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
|
mit
|
bobbymckinney/seebeck_measurement
|
old versions/program_roomtemp/RT_Seebeck_Processing_v1.py
|
2
|
15941
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 18 16:07:35 2014
@author: Benjamin Kostreva ([email protected])
__Title__
Description:
Interpolates all of the data from the Seebeck files and calculates dT and
corrected voltages.
Comments:
- The linear interpolation calculates what the data in between time stamps
should be.
Edited by Bobby McKinney 2015-01-12
"""
import numpy as np # for linear fits
# for saving plots
import matplotlib.pyplot as plt
# for creating new folders
import os
###############################################################################
class Process_Data:
''' Interpolates the data in order to get a common timestamp and outputs
time, dT, lowV, highV corrected lists with common timestamps on each
line.
'''
#--------------------------------------------------------------------------
def __init__(self, directory, fileName):
self.directory = directory
filePath = directory + '/'+ fileName
ttempA, tempA, ttempB, tempB, tVlow, Vlow, tVhigh, Vhigh, tVhigh2, Vhigh2, tVlow2, Vlow2, ttempB2, tempB2, ttempA2, tempA2 = extract_Data(filePath)
print('extracted data from raw file')
self.ttempA = ttempA # first time stamp in a line
self.ttempA2 = ttempA2 # last time stamp in a line
# This will be the common time stamp for each line after interpolation:
self.t = [None]*(len(ttempA)-1)
for x in xrange(1, len(ttempA)):
self.t[x-1] = (self.ttempA[x] + self.ttempA2[x-1])/2
print('find dT')
# Finding dT (after interpolation, at common time):
tempA_int = self.interpolate(ttempA, tempA)
tempA2_int = self.interpolate(ttempA2, tempA2)
for x in xrange(len(tempA_int)):
tempA_int[x] = (tempA_int[x] + tempA2_int[x])/2
tempB_int = self.interpolate(ttempB, tempB)
tempB2_int = self.interpolate(ttempB2, tempB2)
for x in xrange(len(tempA_int)):
tempB_int[x] = (tempB_int[x] + tempB2_int[x])/2
self.dT = [None]*len(tempA_int)
for x in xrange(len(tempA_int)):
self.dT[x] = tempA_int[x] - tempB_int[x]
# Finding avg T:
self.avgT = [None]*len(tempA_int)
for x in xrange(len(tempA_int)):
self.avgT[x] = (tempA_int[x] + tempB_int[x])/2
print('find corrected voltage')
# Voltage Corrections (after interpolation, at common time):
Vlow_int = self.interpolate(tVlow, Vlow)
Vlow2_int = self.interpolate(tVlow2, Vlow2)
for x in xrange(len(Vlow_int)):
Vlow_int[x] = (Vlow_int[x] + Vlow2_int[x])/2
self.Vlow_int_corrected = self.voltage_Correction(Vlow_int,'low')
Vhigh_int = self.interpolate(tVhigh, Vhigh)
Vhigh2_int = self.interpolate(tVhigh2, Vhigh2)
for x in xrange(len(Vhigh_int)):
Vhigh_int[x] = (Vhigh_int[x] + Vhigh2_int[x])/2
self.Vhigh_int_corrected = self.voltage_Correction(Vhigh_int,'high')
print('calculate seebeck')
# Complete linear fits to the data to find Seebeck coefficients:
low_seebeck, high_seebeck = self.calculate_seebeck()
print "low_seebeck: ",low_seebeck
print "high_seebeck: ", high_seebeck
print('extracting data from fits')
# Extract out the data from the fits in order to write to file later:
self.mlow, self.blow, self.rlow = self.extract_seebeck_elements(low_seebeck)
self.mhigh, self.bhigh, self.rhigh = self.extract_seebeck_elements(high_seebeck)
#end init
#--------------------------------------------------------------------------
def interpolate(self, tdata, data):
''' Interpolates the data in order to achieve a single time-stamp
on each data line.
'''
y0 = data[0]
t0 = tdata[0]
y = [None]*(len(data)-1)
for x in xrange(1, len(data)):
y1 = data[x]
t1 = tdata[x]
t_term = (self.t[x-1] - t0)/(t1 - t0)
# Linear interpolation:
y[x-1] = y0*(1 - t_term) + y1*t_term
y0 = data[x]
t0 = data[x]
#end for
return y
#end def
#--------------------------------------------------------------------------
def voltage_Correction(self, raw_data, side):
''' raw_data must be in uV, corrects the voltage measurements from the
thermocouples
'''
# Kelvin conversion for polynomial correction.
avgT_Kelvin = [None]*len(self.avgT)
for x in xrange(len(self.avgT)):
avgT_Kelvin[x] = self.avgT[x] + 273.15
# Correction for effect from Thermocouple Seebeck
v_corrected = [None]*len(avgT_Kelvin)
for x in xrange(len(avgT_Kelvin)):
v_corrected[x] = self.alphacalc(avgT_Kelvin[x],side)*self.dT[x] - raw_data[x]
return v_corrected
#end def
#--------------------------------------------------------------------------
def alphacalc(self, x, side):
''' x = avgT
alpha in uV/K
'''
### If Chromel, taken from Chromel_Seebeck.txt
if side == 'high':
if ( x >= 270 and x < 700):
alpha = -2467.61114613*x**0 + 55.6028987953*x**1 + \
-0.552110359087*x**2 + 0.00320554346691*x**3 + \
-1.20477254034e-05*x**4 + 3.06344710205e-08*x**5 + \
-5.33914758601e-11*x**6 + 6.30044607727e-14*x**7 + \
-4.8197269477e-17*x**8 + 2.15928374212e-20*x**9 + \
-4.30421084091e-24*x**10
#end if
elif ( x >= 700 and x < 1599):
alpha = 1165.13254764*x**0 + -9.49622421414*x**1 + \
0.0346344390853*x**2 + -7.27785048931e-05*x**3 + \
9.73981855547e-08*x**4 + -8.64369652227e-11*x**5 + \
5.10080771762e-14*x**6 + -1.93318725171e-17*x**7 + \
4.27299905603e-21*x**8 + -4.19761748937e-25*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Chromel)
### If Alumel, taken from Alumel_Seebeck.txt
elif side == 'low':
if ( x >= 270 and x < 570):
alpha = -3465.28789643*x**0 + 97.4007289124*x**1 + \
-1.17546754681*x**2 + 0.00801252041119*x**3 + \
-3.41263237031e-05*x**4 + 9.4391002358e-08*x**5 + \
-1.69831949233e-10*x**6 + 1.91977765586e-13*x**7 + \
-1.2391854625e-16*x**8 + 3.48576207577e-20*x**9
#end if
elif ( x >= 570 and x < 1599):
alpha = 254.644633774*x**0 + -2.17639940109*x**1 + \
0.00747127856327*x**2 + -1.41920634198e-05*x**3 + \
1.61971537881e-08*x**4 + -1.14428153299e-11*x**5 + \
4.969263632e-15*x**6 + -1.27526741699e-18*x**7 + \
1.80403838088e-22*x**8 + -1.23699936952e-26*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Alumel)
else:
print "Error in voltage correction."
#end if (K-type)
return alpha
#end def
#--------------------------------------------------------------------------
def calculate_seebeck(self):
'''
Calculates Seebeck for each measurement by finding the slope of a
linear fit to the corrected voltage and dT.
'''
x = self.dT
y_Vlow = self.Vlow_int_corrected
y_Vhigh = self.Vhigh_int_corrected
Vlow_fit = self.polyfit(x,y_Vlow,1)
Vhigh_fit = self.polyfit(x,y_Vhigh, 1)
self.create_plot(x, y_Vlow, Vlow_fit, title='low seebeck plot')
self.create_plot(x, y_Vhigh, Vhigh_fit, title='high seebeck plot')
return Vlow_fit, Vhigh_fit
#end def
#--------------------------------------------------------------------------
def polyfit(self, x, y, degree):
'''
Returns the polynomial fit for x and y of degree degree along with the
r^2 and the temperature, all in dictionary form.
'''
results = {}
coeffs = np.polyfit(x, y, degree)
# Polynomial Coefficients
results['polynomial'] = coeffs.tolist()
# Calculate coefficient of determination (r-squared):
p = np.poly1d(coeffs)
# fitted values:
yhat = p(x) # or [p(z) for z in x]
# mean of values:
ybar = np.sum(y)/len(y) # or sum(y)/len(y)
# regression sum of squares:
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
# total sum of squares:
sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
results['r-squared'] = ssreg / sstot
return results
#end def
#--------------------------------------------------------------------------
def create_plot(self, x, y, fit, title):
dpi = 400
plt.ioff()
# Create Plot:
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(111)
ax.grid()
ax.set_title(title)
ax.set_xlabel("dT (K)")
ax.set_ylabel("dV (uV)")
# Plot data points:
ax.scatter(x, y, color='r', marker='.', label="Voltage")
# Overlay linear fits:
coeffs = fit['polynomial']
p = np.poly1d(coeffs)
xp = np.linspace(min(x), max(x), 5000)
eq = 'dV = %.2f*(dT) + %.2f' % (coeffs[0], coeffs[1])
ax.plot(xp, p(xp), '-', c='g', label="Voltage Fit\n %s" % eq)
ax.legend(loc='upper left', fontsize='10')
# Save:
fig.savefig('%s.png' % (self.directory +'/'+ title) , dpi=dpi)
plt.close()
#end def
#--------------------------------------------------------------------------
def extract_seebeck_elements(self, definitions):
'''
Extracts the data from the Seebeck fits in order to write to file later.
definitions - ouput of self.calculate_seebeck()
'''
m = definitions['polynomial'][0]
b = definitions['polynomial'][1]
r = definitions['r-squared']
return m, b, r
#end def
#--------------------------------------------------------------------------
def return_output(self):
return self.t, self.avgT, self.dT, self.Vlow_int_corrected, self.Vhigh_int_corrected
#end def
#--------------------------------------------------------------------------
def return_seebeck(self):
# temps are the same for both
temp = sum(self.avgT)/len(self.avgT)
return temp, self.mlow, self.blow, self.rlow, self.mhigh, self.bhigh, self.rhigh
#end def
#end class
###############################################################################
#--------------------------------------------------------------------------
def extract_Data(filePath):
f = open(filePath)
loadData = f.read()
f.close()
loadDataByLine = loadData.split('\n')
numericData = loadDataByLine[5:]
# Create lists that are one less than the total number of lines...
# this stops any errors from an incomplete line at the end. :
length = len(numericData)-2
ttempA = [None]*length
tempA = [None]*length
ttempB = [None]*length
tempB = [None]*length
tVlow = [None]*length
Vlow = [None]*length
tVhigh = [None]*length
Vhigh = [None]*length
tVhigh2 = [None]*length
Vhigh2 = [None]*length
tVlow2 = [None]*length
Vlow2 = [None]*length
ttempB2 = [None]*length
tempB2 = [None]*length
ttempA2 = [None]*length
tempA2 = [None]*length
print('Successfully loaded data by line')
for x in xrange(length):
line = numericData[x].split(',')
ttempA[x] = float(line[0])
tempA[x] = float(line[1])
ttempB[x] = float(line[2])
tempB[x] = float(line[3])
tVlow[x] = float(line[4])
Vlow[x] = float(line[5])
tVhigh[x] = float(line[6])
Vhigh[x] = float(line[7])
tVhigh2[x] = float(line[8])
Vhigh2[x] = float(line[9])
tVlow2[x] = float(line[10])
Vlow2[x] = float(line[11])
ttempB2[x] = float(line[12])
tempB2[x] = float(line[13])
ttempA2[x] = float(line[14])
tempA2[x] = float(line[15])
#end for
print('Successfully split each line of data')
return ttempA, tempA, ttempB, tempB, tVlow, Vlow, tVhigh, Vhigh, tVhigh2, Vhigh2, tVlow2, Vlow2, ttempB2, tempB2, ttempA2, tempA2
#end def
#--------------------------------------------------------------------------
def create_processed_files(directory, fileName):
'''
Writes the output from the Process_Data object into seperate files.
'''
# Make a new folder:
print 'start processing'
print 'directory: '+ directory
print 'fileName: ' + fileName
Post_Process = Process_Data(directory, fileName)
print('data processed')
### Write processed data to a new file:
outFile = directory + '/Processed_Data.csv'
file = outFile # creates a data file
myfile = open(outFile, 'w') # opens file for writing/overwriting
myfile.write('Time (s),Average T (C),dT (K),Low V Corrected (uV),High V Corrected (uV)\n')
time, avgT, dT, Vlow, Vhigh = Post_Process.return_output()
for x in xrange(len(time)):
myfile.write('%.2f,%f,%f,%f,%f\n' % (time[x], avgT[x], dT[x], Vlow[x], Vhigh[x]))
myfile.close()
### Write linear fits and calculated Seebeck coefficients to a new file:
seebeck_file = directory + '/Seebeck.csv'
file = seebeck_file
myfile = open(seebeck_file, 'w')
myfile.write('Linear Fit: seebeck*x + offset\n')
myfile.write('\n')
myfile.write('Low (i.e. Alumel):,,,,,High (i.e. Chromel):\n')
table_header = 'Temp (C),Seebeck (uV/K),offset,r^2'
myfile.write('%s,,%s\n' % (table_header,table_header))
temp, low_m, low_b, low_r, high_m, high_b, high_r = Post_Process.return_seebeck()
myfile.write('%f,%f,%f,%f,,%f,%f,%f,%f\n' % (temp, low_m, low_b, low_r, temp, high_m, high_b, high_r))
myfile.close()
#end def
#==============================================================================
def main():
inFile = 'Data.csv'
directory = '../Google\ Drive/rwm-tobererlab/Seebeck Data 2015-05-25 20.03.24'
create_processed_files(directory, inFile, "k-type")
#end def
if __name__ == '__main__':
main()
#end if
|
gpl-3.0
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/xgboost/pyunit_xgboost_zero_nan_handling_native_comparison.py
|
2
|
3486
|
import sys
sys.path.insert(1,"../../../")
import pandas as pd
import numpy as np
from h2o.estimators.xgboost import *
from tests import pyunit_utils
from scipy.sparse import csr_matrix
def xgboost_categorical_zero_nan_handling_sparse_vs_native():
if sys.version.startswith("2"):
print("native XGBoost tests only supported on python3")
return
import xgboost as xgb
assert H2OXGBoostEstimator.available() is True
# Artificial data to be used throughout the test, very simple
raw_training_data = {'col1': [0, float('NaN'), 1],
'response': [20, 30, 40]}
raw_prediction_data = {'col1': [0, float('NaN'), 1]}
prediction_frame= pd.DataFrame(data=raw_prediction_data)
# Native XGBoost training data
col1 = np.matrix([[0],[float('NaN')],[1]])
training_data_csr = csr_matrix(col1);
training_data_label = [20, 30, 40]
predict_test_data_csr = csr_matrix(col1)
# Native XGBosot model trained first
dtrain = xgb.DMatrix(data=training_data_csr, label=training_data_label)
dtest = xgb.DMatrix(data=predict_test_data_csr)
runSeed = 10
ntrees = 1
h2oParamsS = {"ntrees":ntrees, "max_depth":4, "seed":runSeed, "learn_rate":1.0, "col_sample_rate_per_tree" : 1.0,
"min_rows": 1, "score_tree_interval": ntrees + 1, "dmatrix_type": "sparse", "tree_method": "exact",
"backend": "cpu", "reg_lambda": 0.0}
nativeParam = {'colsample_bytree': h2oParamsS["col_sample_rate_per_tree"],
'tree_method': 'exact',
'seed': h2oParamsS["seed"],
'booster': 'gbtree',
'objective': 'reg:linear',
'eta': h2oParamsS["learn_rate"],
'grow_policy': 'depthwise',
'alpha': 0.0,
'lambda': h2oParamsS["reg_lambda"],
'subsample': 1.0,
'colsample_bylevel': 1.0,
'max_delta_step': 0.0,
'min_child_weight': h2oParamsS["min_rows"],
'gamma': 0.0,
'max_depth': h2oParamsS["max_depth"]}
bst = xgb.train(params=nativeParam, dtrain=dtrain)
native_prediction = bst.predict(data=dtest)
pandas_training_frame = pd.DataFrame(data=raw_training_data)
training_frame = h2o.H2OFrame(pandas_training_frame)
training_frame['col1'] = training_frame['col1'].asnumeric()
training_frame['response'] = training_frame['response'].asnumeric()
prediction_frame = h2o.H2OFrame(prediction_frame)
prediction_frame['col1'] = prediction_frame['col1'].asnumeric()
sparse_trained_model = H2OXGBoostEstimator(**h2oParamsS)
sparse_trained_model.train(x=['col1'], y='response', training_frame=training_frame)
sparse_based_prediction = sparse_trained_model.predict(prediction_frame['col1'])
#Prediction should be equal. In both cases, 0 and NaN should be treated the same (default direction for NaN)
print(native_prediction)
print(sparse_based_prediction)
assert sparse_based_prediction['predict'][0,0] == sparse_based_prediction['predict'][1,0]
assert native_prediction[0].item() == native_prediction[1].item()
assert sparse_based_prediction['predict'][2,0] == native_prediction[2].item()
if __name__ == "__main__":
pyunit_utils.standalone_test(xgboost_categorical_zero_nan_handling_sparse_vs_native)
else:
xgboost_categorical_zero_nan_handling_sparse_vs_native()
|
apache-2.0
|
ocefpaf/iris
|
docs/iris/example_code/Meteorology/TEC.py
|
2
|
1054
|
"""
Ionosphere space weather
========================
This space weather example plots a filled contour of rotated pole point
data with a shaded relief image underlay. The plot shows aggregated
vertical electron content in the ionosphere.
The plot exhibits an interesting outline effect due to excluding data
values below a certain threshold.
"""
import matplotlib.pyplot as plt
import numpy.ma as ma
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load the "total electron content" cube.
filename = iris.sample_data_path("space_weather.nc")
cube = iris.load_cube(filename, "total electron content")
# Explicitly mask negative electron content.
cube.data = ma.masked_less(cube.data, 0)
# Plot the cube using one hundred colour levels.
qplt.contourf(cube, 100)
plt.title("Total Electron Content")
plt.xlabel("longitude / degrees")
plt.ylabel("latitude / degrees")
plt.gca().stock_img()
plt.gca().coastlines()
iplt.show()
if __name__ == "__main__":
main()
|
lgpl-3.0
|
Cronjaeger/coalescent-simulations
|
coalPlot.py
|
1
|
11503
|
'''
This is a collection of scripts intended to plot and display coalescents.
'''
import dendropy as dp
from Bio import Phylo
#import networkx
import numpy as np
import finiteSitesModell_investigations as fsmi
from cStringIO import StringIO
import matplotlib.pyplot as plt
class tree_node(object):
def __init__(self, data = None, label = None, parent = None, children = list(), Tree = None, dist = 1.0):
if parent is not None and type(parent) is not tree_node:
raise ValueError('Invalid parent-argument')
if Tree is not None and type(Tree) is not rooted_tree:
raise ValueError('Invalid Tree-argument passed.')
if any(type(c) is not tree_node for c in children):
raise ValueError('Invalid: Some children passed which were not of type tree_node')
self.data = data
self.label = label
self.Tree = Tree
self.parent = parent
self.distanceToParent = dist
self.children = list(children)
def __str__(self):
return str(label)
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
def leaves(self):
'''
returns a list of all leaves subtending the node.
if the node itself is a leaf, returns [node],
'''
output = []
if len(node.children) == 0:
output.append(self)
else:
for c in self.children:
output.extend(c.leaves())
return output
# class tree_edge(object):
# def __init__(self, origin, terminus, data = None, label = None, length = 1, Tree = None):
# self.origin = origin
# self.terminus = terminus
# self.data = data
# self.label = label
# self.length = length
# self.Tree = Tree
def node_to_string(node):
return str(node.label)
def node_to_newick(node, label_internal_nodes = False, initialCall = True):
'''
prints a newick representation of the phylogeny rooted at 'node'.
'''
# we call the function on all chiildren and agregate the results
out_str = ','.join([node_to_newick(child, label_internal_nodes, False) for child in node.children])
if len(node.children) > 0:
out_str = '(%s)'%out_str
if label_internal_nodes:
out_str += node_to_string(node)
else:
out_str = node_to_string(node)
if initialCall:
return '%s;'%out_str
else:
return '%s:%f'%(out_str,float(node.distanceToParent))
class rooted_tree(object):
def __init__(self, root, root_key = 'root'):
'''
initialize a rooted tree. The argument root must be of type node.
'''
if type(root) is not tree_node:
raise ValueError('argument \'root\' must have type tree_node')
root.Tree = self
self.root = root
self.nodes = [root]
self.nodes_dict = {root_key:root}
def add_node(self, new_node, parent = None, key = None):
if new_node in self.nodes:
raise ValueError('Error: atempt to add a node which is already in the tree!')
if parent is not None:
if parent.Tree is not self:
raise ValueError('cannot add node to Tree (specified parent is not in it)')
new_node.Tree = self
if parent is not None:
new_node.parent = parent
parent.children.append(new_node)
self.nodes.append(new_node)
if key == None: ## we were not provided with a key for the node.
i = len[nodes] #first attempt: the nth node to be added is given the label n
while self.nodes_dict.has_key(i): # avoid label-collisions.
i += 1
#new_node.label = i
key = i
self.nodes_dict[key] = new_node
def subdivide_edge(self, origin, terminus, new_node, d_origin_to_new = 1.0, d_new_to_terminus = 1.0, new_node_key = None):
'''
Will insert new_node between origin and terminus
'''
try:
float(d_origin_to_new)
float(d_new_to_terminus)
except TypeError:
raise TypeError('Invalid distances passed.')
self.add_node(new_node, key = new_node_key)
#new node replaces terminus in list of children of origin
i = origin.children.index(terminus)
origin.children[i] = new_node
new_node.distanceToParent = d_origin_to_new
#new node is new parent of terminus
terminus.parent = new_node
new_node.children.append(terminus)
terminus.distanceToParent = d_new_to_terminus
def str_newick(self,label_internal = True):
return node_to_newick(self.root,label_internal)
def str_nexus(self,label_internal = False):
preamble_str = '#NEXUS\n'
taxa = [str(node.label) for node in self.nodes]
taxa_str = '\nBEGIN TAXA;\n TaxLabels %s;\nEND;\n'%' '.join(taxa)
tree_str = '\nBEGIN TREES;\n Tree phylogeny=%s\nEND;\n'%self.str_newick(label_internal)
return preamble_str + taxa_str + tree_str
def sim_to_tree(sim, mutation_filter = lambda x: True):
'''
Sim should be of the class fsmi.simulator_KingmanFiniteSites
mutation m will be dropped if mutation_filter(m)==False
'''
if type(sim) is not fsmi.simulator_KingmanFiniteSites:
raise Error('currently only implemented for finite sites coalescent (in other cases, mutations must be handled differently than this method does.)')
coal = sim.coal
mutation_events = coal.getMutations()
mutation_events.sort(reverse = True) #sort with highest time-index first (closest to MRCA)
mutation_events = filter(mutation_filter,mutation_events)
merger_events = coal.getChain_with_times()
merger_events.sort(reverse = True)
#print merger_events
# events = coal.getChain_all_events()
root = merger_events[0]
root_data = { 't':root[0],
'block':root[1].blocks[0],
'type':'lineage',
'sequence':np.array(sim.MRCA_seq)}
root_node = tree_node(data = root_data, label='MRCA', dist = None)
tree = rooted_tree(root_node, root_key = tuple(root[1].blocks[0]))
# for event in events:
# if type(event[1]) is libcoal.partition:
# pass
# elif type(event[1]) is np.int64:
# pass
# else
# raise Warning('Unknown event-type: %s for event %s'%(str(type(event[1])),str(event)))
t_old = root[0]
blocks = [list(root[1].blocks[0])]
for merger_event in merger_events[1:]:
# print 'event: t=%f, %s'%(merger_event[0],str(merger_event[1]))
t_new = merger_event[0]
blocks_old = list(blocks)
blocks = list(merger_event[1].blocks)
blocks_new = [b for b in blocks if b not in blocks_old]
removed_block = [b for b in blocks_old if b not in blocks][0]
try:
assert len(blocks_new) == 2
assert set(removed_block) == set(blocks_new[0]).union(set(blocks_new[1]))
except AssertionError:
print 'blocks: ',blocks
print 'blocks_new: ',blocks_new
print 'blocks_old: ',blocks_old
raise AssertionError()
parent_node = tree.nodes_dict[tuple(removed_block)]
t_old = parent_node.data['t']
assert type(t_old) is not tuple
block_data_basic = {'t':t_new,'type':'lineage', 'sequence':np.array(parent_node.data['sequence'])}
for b in blocks_new:
block_data = dict(block_data_basic)
block_data['block'] = b
block_label = label_block(b)
block_node = tree_node( data = block_data, label = block_label, parent = parent_node, Tree = tree, dist = t_old - t_new)
tree.add_node(block_node, parent_node, key = tuple(b))
# we update times in all blocks
for b in blocks:
node = tree.nodes_dict[tuple(b)]
node.data['t'] = t_new
node.distanceToParent = node.parent.data['t'] - t_new
for mutation in filter(lambda x: t_new < x[0] and x[0] <= t_old, mutation_events):
# Extract relevent information from the entry
t_mutation = mutation[0]
lineage = mutation[1]
site = mutation[2]
shift = mutation[3]
mutation_index = mutation[4]
# Determine the parent and child nodes of the lineages in question
node_below = tree.nodes_dict[tuple(blocks[lineage])]
node_above = node_below.parent
# determine the length of the new edges
t_above = node_above.data['t']
t_below = node_below.data['t']
delta_above = t_above - t_mutation
delta_below = t_mutation - t_below
#determine how the mutation changes the sequence
sequence = np.array(node_above.data['sequence'])
assert all(sequence == node_below.data['sequence'])
allele_before = sequence[site]
sequence[site] += shift
sequence[site] %= 4
allele_after = sequence[site]
#update the sequence of the child node
node_below.data['sequence'] = np.array(sequence)
node_data = {'t':t_mutation,
'type':'mutation',
'sequence':sequence,
'site':site,
'shift':shift,
'from_to':(allele_before, allele_after)}
label = label_mutation(mutation, site, allele_before, allele_after)
mutation_node = tree_node(data = node_data, label = label)
mutation_key = 'm%i'%mutation_index
tree.subdivide_edge( origin = node_above,
terminus = node_below,
new_node = mutation_node,
d_origin_to_new = delta_above,
d_new_to_terminus = delta_below,
new_node_key = mutation_key)
return tree
def site_list_to_filter(sites_to_keep):
return lambda m: m[2] in sites_to_keep
def label_mutation(mutation, site, allele_before, allele_after):
#return '%i@%i'%(allele_after,site)
return '%ito%i@%i'%(allele_before,allele_after,site)
# def label_mutation(m):
# return 'x+%i\%4_@site_%i'&(m[4],m[3])
def label_block(b):
# return '-'.join(map(str,b))
if len(b) > 1:
return ''
else:
return b[0]
def label_leaf(l):
return str(l)
def test(n = 10, theta = 1.0, L = 10):
sim = fsmi.simulator_KingmanFiniteSites(n,theta,L)
myTree = sim_to_tree(sim)
# str_newick_test = '(A,(B,C)D);'
# dpTree_test = dp.Tree.get(data = str_newick_test, schema = 'newick')
# dpTree_test.print_plot()
str_newick_sim = myTree.str_newick(True)
print str_newick_sim
dpTree_sim = dp.Tree.get(data = str_newick_sim, schema = 'newick')
dpTree_sim.print_plot()
phyloTree = Phylo.read(StringIO(str_newick_sim),'newick')
print phyloTree
plt.figure()
Phylo.draw(phyloTree)
phyloTree.rooted = True
plt.figure()
Phylo.draw_graphviz(phyloTree, prog = 'dot')
plt.draw()
# str_nexus_sim = tree.str_nexus(True)
# dpTree_sim_nx = dp.Tree.get(data = str_nexus_sim, schema = 'nexus')
# dpTree_sim_nx.print_plot()
if __name__ == '__main__':
#test()
pass
|
gpl-2.0
|
nkarast/Snippets
|
Python/gaussian_distro.py
|
1
|
3119
|
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
class Particles:
def __init__(self, mean=[0,0,0], cov=[[1,0,0], [0,1,0], [0,0,0.1]], N=1000):
self.mean = mean
self.cov = cov
self.N = N
self.generate_Particles()
self.reset_x
self.reset_y
self.reset_z
def generate_Particles(self):
self.x , self.y, self.z = np.random.multivariate_normal(self.mean, self.cov, self.N).T
self.reset_x, self.reset_y, self.reset_z = self.x , self.y, self.z
def plot_distribution(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(self.x,self.y,self.z, c = 'red', alpha=0.3)
ax.set_xlabel('X [a.u.]')
ax.set_ylabel('Y [a.u.]')
ax.set_zlabel('Z [a.u.]')
ax.set_xlim(-5,5)
ax.set_ylim(-5,5)
ax.set_zlim(-5,5)
plt.grid(True)
plt.show()
def move_distribution(self,t):
self.x += np.sqrt(3.75)*np.cos(10*t)
self.y += np.sqrt(3.75)*np.cos(20*t)
self.z += 2*t #100+np.sqrt(3.75)*np.cos(0.5*t)
def reset_distribution(self):
self.x = self.reset_x
self.y = self.reset_y
self.z = self.reset_z
## Mean values and Covariance matrix for gaussian
# mean = [0,0,0]
# cov = [[1,0,0], [0,1,0], [0,0,0.1]]
# x, y, z = np.random.multivariate_normal(mean, cov, 1000).T
#print x, "\n\n", y, "\n\n", z
#plt.ion()
distr = Particles(N=10)
fig = plt.figure(figsize = (7,7))
axes = fig.add_subplot(111, projection='3d')
time_template = 't = %.2f'
time_text = axes.text(0.05, 0.9, 0.9,'', transform=axes.transAxes)
def animate(coords):
time_text.set_text(time_template % coords[3])
axes.set_xlabel('X [a.u.]')
axes.set_ylabel('Y [a.u.]')
axes.set_zlabel('Z [a.u.]')
axes.set_xlim(-2,10)
axes.set_ylim(-2,10)
axes.set_zlim(-2,5000)
plt.grid(True)
return axes.scatter([coords[0]],[coords[1]], [coords[2]], c='red', alpha=0.2),time_text
def frames():
for t in np.arange(0,50,1):
distr.move_distribution(t)
yield distr.x, distr.y, distr.z, t
anim = animation.FuncAnimation(fig, animate,
frames=frames, interval=100, blit=False, repeat=False)
#plt.show()
#plt.clf()
## ----------------------------------
distr.reset_distribution()
fig2 = plt.figure(figsize = (7,7))
axes2 = fig2.add_subplot(111)
def animate2(coords):
time_text.set_text(time_template % coords[2])
axes2.set_xlim(-2,10)
axes2.set_ylim(-2,10)
axes2.set_xlabel("X")
axes2.set_ylabel("Y")
plt.grid(True)
return plt.scatter([coords[0]],[coords[1]], c='g', marker='^', alpha=0.5),time_text
def frames2():
for t in np.arange(0,100,1):
distr.move_distribution(t)
yield distr.x, distr.y, t
ani2 = animation.FuncAnimation(fig2, animate2,
frames=frames2, interval=100, blit=False, repeat=False)
plt.show()
# print distr.x.shape
# distr.plot_distribution()
# distr.move_distribution(20)
# distr.plot_distribution()
# for t in np.arange(0,5,1):
# distr.move_distribution(t)
# distr.plot_distribution()
|
gpl-3.0
|
matthew-tucker/mne-python
|
examples/inverse/plot_dics_beamformer.py
|
18
|
2905
|
"""
=====================================
Compute DICS beamfomer on evoked data
=====================================
Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer from single
trial activity in a time-frequency window to estimate source time courses based
on evoked data.
The original reference for DICS is:
Gross et al. Dynamic imaging of coherent sources: Studying neural interactions
in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
"""
# Author: Roman Goj <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
from mne.time_frequency import compute_epochs_csd
from mne.beamformer import dics
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Read raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()
# Read forward operator
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Computing the data and noise cross-spectral density matrices
# The time-frequency window was chosen on the basis of spectrograms from
# example time_frequency/plot_time_frequency.py
data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.04, tmax=0.15,
fmin=6, fmax=10)
noise_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=-0.11, tmax=0.0,
fmin=6, fmax=10)
evoked = epochs.average()
# Compute DICS spatial filter and estimate source time courses on evoked data
stc = dics(evoked, forward, noise_csd, data_csd)
plt.figure()
ts_show = -30 # show the 40 largest responses
plt.plot(1e3 * stc.times,
stc.data[np.argsort(stc.data.max(axis=1))[ts_show:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('DICS value')
plt.title('DICS time course of the 30 largest sources.')
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.set_data_time_index(180)
brain.show_view('lateral')
# Uncomment to save image
# brain.save_image('DICS_map.png')
|
bsd-3-clause
|
shyamalschandra/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
324
|
1862
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
|
bsd-3-clause
|
carlvlewis/bokeh
|
bokeh/models/sources.py
|
7
|
11155
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either, Bool, Enum
from ..validation.errors import COLUMN_LENGTHS
from .. import validation
from ..util.serialization import transform_column_source_data
from .actions import Callback
from bokeh.deprecate import deprecated
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = Dict(String, Dict(String, Any), default={
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single argument that
is a dict or pandas.DataFrame, that argument is used as the value for the
"data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
ColumnDataSource(df) # same as ColumnDataSource(data=df)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
@staticmethod
def _data_from_df(df):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = df.index
new_data = {}
for colname in df:
new_data[colname] = df[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
@classmethod
@deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
import warnings
warnings.warn("Method deprecated in Bokeh 0.9.3")
return cls._data_from_df(data)
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def vm_serialize(self, changed_only=True):
attrs = super(ColumnDataSource, self).vm_serialize(changed_only=changed_only)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def push_notebook(self):
""" Update date for a plot in the IPthon notebook in place.
This function can be be used to update data in plot data sources
in the IPython notebook, without having to use the Bokeh server.
Returns:
None
.. warning::
The current implementation leaks memory in the IPython notebook,
due to accumulating JS code. This function typically works well
with light UI interactions, but should not be used for continuously
updating data. See :bokeh-issue:`1732` for more details and to
track progress on potential fixes.
"""
from IPython.core import display
from bokeh.protocol import serialize_json
id = self.ref['id']
model = self.ref['type']
json = serialize_json(self.vm_serialize())
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(model=model, id=id, json=json)
display.display_javascript(js, raw=True)
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
class RemoteSource(DataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
data = Dict(String, Any, help="""
Additional data to include directly in this data source object. The
columns provided here are merged with those from the Bokeh server.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
class BlazeDataSource(RemoteSource):
#blaze parts
expr = Dict(String, Any(), help="""
blaze expression graph in json form
""")
namespace = Dict(String, Any(), help="""
namespace in json form for evaluating blaze expression graph
""")
local = Bool(help="""
Whether this data source is hosted by the bokeh server or not.
""")
def from_blaze(self, remote_blaze_obj, local=True):
from blaze.server import to_tree
# only one Client object, can hold many datasets
assert len(remote_blaze_obj._leaves()) == 1
leaf = remote_blaze_obj._leaves()[0]
blaze_client = leaf.data
json_expr = to_tree(remote_blaze_obj, {leaf : ':leaf'})
self.data_url = blaze_client.url + "/compute.json"
self.local = local
self.expr = json_expr
def to_blaze(self):
from blaze.server.client import Client
from blaze.server import from_tree
from blaze import Data
# hacky - blaze urls have `compute.json` in it, but we need to strip it off
# to feed it into the blaze client lib
c = Client(self.data_url.rsplit('compute.json', 1)[0])
d = Data(c)
return from_tree(self.expr, {':leaf' : d})
class ServerDataSource(BlazeDataSource):
""" A data source that referes to data located on a Bokeh server.
The data from the server is loaded on-demand by the client.
"""
# Paramters of data transformation operations
# The 'Any' is used to pass primtives around.
# TODO: (jc) Find/create a property type for 'any primitive/atomic value'
transform = Dict(String,Either(Instance(PlotObject), Any), help="""
Paramters of the data transformation operations.
The associated valuse is minimally a tag that says which downsample routine
to use. For some downsamplers, parameters are passed this way too.
""")
|
bsd-3-clause
|
great-expectations/great_expectations
|
great_expectations/render/renderer/v3/suite_profile_notebook_renderer.py
|
1
|
6852
|
from typing import Any, Dict, List, Union
import nbformat
from great_expectations import DataContext
from great_expectations.core.batch import BatchRequest
from great_expectations.render.renderer.suite_edit_notebook_renderer import (
SuiteEditNotebookRenderer,
)
class SuiteProfileNotebookRenderer(SuiteEditNotebookRenderer):
def __init__(
self,
context: DataContext,
expectation_suite_name: str,
batch_request: Union[str, Dict[str, Union[str, int, Dict[str, Any]]]],
):
super().__init__(context=context)
if batch_request is None:
batch_request = {}
self.batch_request = batch_request
self.validator = context.get_validator(
batch_request=BatchRequest(**batch_request),
expectation_suite_name=expectation_suite_name,
)
self.expectation_suite_name = self.validator.expectation_suite_name
# noinspection PyMethodOverriding
def add_header(self):
self.add_markdown_cell(
markdown=f"""# Initialize a new Expectation Suite by profiling a batch of your data.
This process helps you avoid writing lots of boilerplate when authoring suites by allowing you to select columns and other factors that you care about and letting a profiler write some candidate expectations for you to adjust.
**Expectation Suite Name**: `{self.expectation_suite_name}`
"""
)
self.add_code_cell(
code=f"""\
import datetime
import pandas as pd
import great_expectations as ge
import great_expectations.jupyter_ux
from great_expectations.core.batch import BatchRequest
from great_expectations.profile.user_configurable_profiler import UserConfigurableProfiler
from great_expectations.checkpoint import SimpleCheckpoint
from great_expectations.exceptions import DataContextError
context = ge.data_context.DataContext()
batch_request = {self.batch_request}
expectation_suite_name = "{self.expectation_suite_name}"
validator = context.get_validator(
batch_request=BatchRequest(**batch_request),
expectation_suite_name=expectation_suite_name
)
column_names = [f'"{{column_name}}"' for column_name in validator.columns()]
print(f"Columns: {{', '.join(column_names)}}.")
validator.head(n_rows=5, fetch_all=False)
""",
lint=True,
)
def _add_available_columns_list(self):
column_names: List[str]
column_name: str
column_names = [
f' "{column_name}"\n,' for column_name in self.validator.columns()
]
code: str = f'ignored_columns = [\n{"".join(column_names)}]'
self.add_code_cell(code=code, lint=True)
def add_footer(self):
self.add_markdown_cell(
markdown="""# Save & review your new Expectation Suite
Let's save the draft expectation suite as a JSON file in the
`great_expectations/expectations` directory of your project and rebuild the Data
Docs site to make it easy to review your new suite."""
)
code_cell: str = """\
print(validator.get_expectation_suite(discard_failed_expectations=False))
validator.save_expectation_suite(discard_failed_expectations=False)
checkpoint_config = {
"class_name": "SimpleCheckpoint",
"validations": [
{
"batch_request": batch_request,
"expectation_suite_name": expectation_suite_name
}
]
}
checkpoint = SimpleCheckpoint(
f"_tmp_checkpoint_{expectation_suite_name}",
context,
**checkpoint_config
)
checkpoint_result = checkpoint.run()
context.build_data_docs()
validation_result_identifier = checkpoint_result.list_validation_result_identifiers()[0]
context.open_data_docs(resource_identifier=validation_result_identifier)
"""
self.add_code_cell(code=code_cell, lint=True)
self.add_markdown_cell(
markdown=f"""## Next steps
After you review this initial Expectation Suite in Data Docs you
should edit this suite to make finer grained adjustments to the expectations.
This can be done by running `great_expectations suite edit {self.expectation_suite_name}`."""
)
# noinspection PyMethodOverriding
def render(self) -> nbformat.NotebookNode:
self._notebook = nbformat.v4.new_notebook()
self.add_header()
self.add_markdown_cell(
markdown="""# Select columns
Select the columns on which you would like to set expectations and those which you would like to ignore.
Great Expectations will choose which expectations might make sense for a column based on the **data type** and **cardinality** of the data in each selected column.
Simply comment out columns that are important and should be included. You can select multiple lines and
use a jupyter keyboard shortcut to toggle each line: **Linux/Windows**:
`Ctrl-/`, **macOS**: `Cmd-/`"""
)
self._add_available_columns_list()
self.add_markdown_cell(
markdown="""# Run the data profiler
The suites generated here are **not meant to be production suites** -- they are **a starting point to build upon**.
**To get to a production-grade suite, you will definitely want to [edit this
suite](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_edit_an_expectation_suite_using_a_disposable_notebook.html?utm_source=notebook&utm_medium=profile_based_expectations)
after this initial step gets you started on the path towards what you want.**
This is highly configurable depending on your goals.
You can ignore columns or exclude certain expectations, specify a threshold for creating value set expectations, or even specify semantic types for a given column.
You can find more information about [how to configure this profiler, including a list of the expectations that it uses, here.](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_create_an_expectation_suite_with_the_user_configurable_profiler.html)
"""
)
self._add_profiler_cell()
self.add_footer()
return self._notebook
# noinspection PyMethodOverriding
def render_to_disk(self, notebook_file_path: str):
"""
Render a notebook to disk from an expectation suite.
"""
self.render()
self.write_notebook_to_disk(
notebook=self._notebook, notebook_file_path=notebook_file_path
)
def _add_profiler_cell(self):
self.add_code_cell(
code=f"""\
profiler = UserConfigurableProfiler(
profile_dataset=validator,
excluded_expectations=None,
ignored_columns=ignored_columns,
not_null_only=False,
primary_or_compound_key=False,
semantic_types_dict=None,
table_expectations_only=False,
value_set_threshold="MANY",
)
suite = profiler.build_suite()""",
lint=True,
)
|
apache-2.0
|
rrohan/scikit-learn
|
examples/manifold/plot_lle_digits.py
|
59
|
8576
|
"""
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
|
bsd-3-clause
|
bioinformatics-centre/AsmVar
|
src/AsmvarVarScore/AGE_Stat.Old.py
|
2
|
7162
|
"""
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author : Shujia Huang
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig( figureFile, distance, nr, aa, bb ) :
fig = plt.figure( num=None, figsize=(16, 18), facecolor='w', edgecolor='k' )
plt.subplot(221)
"""
from matplotlib.colors import LogNorm
plt.hist2d(test[:,4], test[:,5], bins=50, norm=LogNorm())
plt.plot(test[:,0], test[:,1], 'co')
"""
plt.title('Distance distribution', fontsize=16)
plt.plot(distance[:,0] , 100 * distance[:,1]/np.sum(distance[:,1]) , 'ro-' )
plt.xlabel('The breakpoints of varints span on assemble sequence(%)', fontsize=16)
plt.ylabel('% of Number', fontsize=16)
plt.subplot(222)
plt.title('N Ratio', fontsize=16)
plt.plot(nr[:,0], nr[:,2]/np.sum(nr[:,1]), 'yo-' )
plt.axis([0,5,0.0,1.0])
plt.xlabel('N Ratio of varints\' regions(>=%)', fontsize=16)
plt.ylabel('% of Accumulate', fontsize=16)
plt.subplot(223)
plt.plot(aa[:,0], aa[:,2]/np.sum(aa[:,1]), 'mo-' )
plt.axis([0,100,0.0,1.0])
plt.xlabel('Perfect Depth(<=)', fontsize=12)
plt.ylabel('% of Accumulate', fontsize=16)
plt.subplot(224)
plt.plot(bb[:,0], bb[:,2]/np.sum(bb[:,1]), 'ko-' )
plt.axis([0,100,0.0,1.0])
plt.xlabel('Both ImPerfect Depth(<=)', fontsize=12)
plt.ylabel('% of Accumulate', fontsize=16)
fig.savefig(figureFile + '.png')
fig.savefig(figureFile + '.pdf')
def Accum ( data, isBig = False) :
tmpD= data
k = sorted( tmpD.keys(), key = lambda d: float(d) )
dat = []
for i in range( len(k) ) :
if isBig :
for j in range(i,len(k)) : tmpD[k[i]][1] += tmpD[k[j]][0]
else :
for j in range(i+1) : tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append( [ float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ] )
return dat
def SampleFaLen ( faLenFile ) :
if faLenFile[-3:] == '.gz' : I = os.popen( 'gzip -dc %s' % faLenFile )
else : I = open( faLenFile )
data = {}
while 1 :
lines = I.readlines ( 100000 )
if not lines : break
for line in lines :
col = line.strip('\n').split()
data[col[0]] = string.atoi( col[1] )
I.close()
return data
def LoadFaLen ( faLenLstFile ) :
data = {}
I = open (faLenLstFile)
for line in I.readlines() :
if len( line.strip('\n').split() ) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be : "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data : data[sampleId] = {}
data[sampleId] = SampleFaLen( fileName )
I.close()
return data
def main ( argv ) :
qFaLen = LoadFaLen( argv[1] )
figPrefix = 'test'
if len(argv) > 2 : figPrefix = argv[2]
if argv[0][-3:] == '.gz' :
I = os.popen( 'gzip -dc %s' % argv[0] )
else :
I = open ( argv[0] )
data, distance, nr, aa, bb = [],{},{},{},{}
s = set()
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1 : # VCF format
lines = I.readlines( 100000 )
if not lines : break
for line in lines :
col = line.strip('\n').split()
if re.search( r'^#CHROM', line ) : col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line) : continue
key = col[0] + ':' + col[1]
if key in s : continue
s.add(key)
#if re.search(r'^PASS', col[6] ) : continue
if not re.search(r'^PASS', col[6] ) : continue
#if not re.search(r'_TRAIN_SITE', col[7]) : continue
fmat = { k:i for i,k in enumerate( col[8].split(':') ) }
if 'VS' not in fmat or 'QR' not in fmat: continue
annotations = []
for i, sample in enumerate ( col[9:] ) :
sampleId = col2sam[9+i]
if len( sample.split(':')[fmat['AA']].split(',') ) != 4 :
print >> sys.stderr,'[WARNING] %s\n%s' % (line, sample.split(':')[fmat['AA']])
continue
qr = sample.split(':')[fmat['QR']].split(',')[-1]
if qr == '.' : continue
qId, qSta, qEnd = qr.split('-')
qSta = string.atoi(qSta)
qEnd = string.atoi(qEnd)
if sampleId not in qFaLen : raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId )
if qId not in qFaLen[sampleId] : raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n' )
qSta= int( qSta * 100 / qFaLen[sampleId][qId] + 0.5 )
qEnd= int( qEnd * 100 / qFaLen[sampleId][qId] + 0.5 )
if qSta > 100 or qEnd > 100 : raise ValueError ('[ERROR] Query size Overflow! sample : %s; scaffold : %s' % (sampleId, qId) )
leg = qSta
if 100 - qEnd < qSta : leg = qEnd
nn = string.atof(sample.split(':')[fmat['FN']])
n = int(1000 * nn + 0.5) / 10.0
alt = string.atoi( sample.split(':')[fmat['AA']].split(',')[1] ) # Alternate perfect
bot = string.atoi( sample.split(':')[fmat['AA']].split(',')[3] ) # Both imperfect
pro = string.atoi( sample.split(':')[fmat['RP']].split(',')[0] ) # Proper Pair
ipr = string.atoi( sample.split(':')[fmat['RP']].split(',')[1] ) # ImProper Pair
annotations.append( [leg, n , alt, bot, pro, ipr] )
leg, n, alt, bot, pro, ipr = np.median( annotations, axis = 0 )
if leg not in distance : distance[leg] = [0,0]
if n not in nr : nr[n] = [0,0]
if alt not in aa : aa[alt] = [0,0]
if bot not in bb : bb[bot] = [0,0]
distance[leg][0] += 1
nr[n][0] += 1
aa[alt][0] += 1
bb[bot][0] += 1
data.append([leg, alt, pro, ipr, n, bot])
I.close()
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median( data, axis=0 )
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig( figPrefix, \
np.array (Accum( distance )), \
np.array (Accum( nr, True )), \
np.array (Accum( aa )), \
np.array (Accum( bb )) )
if __name__ == '__main__' :
main(sys.argv[1:])
|
mit
|
jairideout/scikit-bio
|
skbio/diversity/beta/__init__.py
|
6
|
7576
|
"""
Beta diversity measures (:mod:`skbio.diversity.beta`)
=====================================================
.. currentmodule:: skbio.diversity.beta
This package contains helper functions for working with scipy's pairwise
distance (``pdist``) functions in scikit-bio, and will eventually be expanded
to contain pairwise distance/dissimilarity methods that are not implemented
(or planned to be implemented) in scipy.
The functions in this package currently support applying ``pdist`` functions
to all pairs of samples in a sample by observation count or abundance matrix
and returning an ``skbio.DistanceMatrix`` object. This application is
illustrated below for a few different forms of input.
Functions
---------
.. autosummary::
:toctree: generated/
pw_distances
pw_distances_from_table
unweighted_unifrac
weighted_unifrac
Examples
--------
Create a table containing 7 OTUs and 6 samples:
.. plot::
:context:
>>> from skbio.diversity.beta import pw_distances
>>> import numpy as np
>>> data = [[23, 64, 14, 0, 0, 3, 1],
... [0, 3, 35, 42, 0, 12, 1],
... [0, 5, 5, 0, 40, 40, 0],
... [44, 35, 9, 0, 1, 0, 0],
... [0, 2, 8, 0, 35, 45, 1],
... [0, 0, 25, 35, 0, 19, 0]]
>>> ids = list('ABCDEF')
Compute Bray-Curtis distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> bc_dm = pw_distances("braycurtis", data, ids)
>>> print(bc_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.78787879 0.86666667 0.30927835 0.85714286 0.81521739]
[ 0.78787879 0. 0.78142077 0.86813187 0.75 0.1627907 ]
[ 0.86666667 0.78142077 0. 0.87709497 0.09392265 0.71597633]
[ 0.30927835 0.86813187 0.87709497 0. 0.87777778 0.89285714]
[ 0.85714286 0.75 0.09392265 0.87777778 0. 0.68235294]
[ 0.81521739 0.1627907 0.71597633 0.89285714 0.68235294 0. ]]
Compute weighted UniFrac distances between all pairs of samples and return a
``DistanceMatrix`` object. Because weighted UniFrac is a phylogenetic beta
diversity metric, we'll need to create a ``skbio.TreeNode`` object that
contains all of the tips in the tree, and pass that along with the ids of
the OTUs corresponding to the counts in ``data``.
>>> from skbio import TreeNode
>>> from io import StringIO
>>> tree = TreeNode.read(StringIO(
... '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... '(OTU4:0.75,(OTU5:0.5,(OTU6:0.5,OTU7:0.5):0.5):0.5'
... '):1.25):0.0)root;'))
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7']
>>> wu_dm = pw_distances("weighted_unifrac", data, ids, tree=tree,
... otu_ids=otu_ids)
>>> print(wu_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 2.77549923 3.82857143 0.42512039 3.8547619 3.10937312]
[ 2.77549923 0. 2.26433692 2.98435423 2.24270353 0.46774194]
[ 3.82857143 2.26433692 0. 3.95224719 0.16025641 1.86111111]
[ 0.42512039 2.98435423 3.95224719 0. 3.98796148 3.30870431]
[ 3.8547619 2.24270353 0.16025641 3.98796148 0. 1.82967033]
[ 3.10937312 0.46774194 1.86111111 3.30870431 1.82967033 0. ]]
Determine if the resulting distance matrices are significantly correlated
by computing the Mantel correlation between them. Then determine if the
p-value is significant based on an alpha of 0.05:
>>> from skbio.stats.distance import mantel
>>> r, p_value, n = mantel(wu_dm, bc_dm)
>>> print(r)
0.922404392093
>>> print(p_value < 0.05)
True
Compute PCoA for both distance matrices, and then find the Procrustes
M-squared value that results from comparing the coordinate matrices.
>>> from skbio.stats.ordination import pcoa
>>> bc_pc = pcoa(bc_dm)
>>> wu_pc = pcoa(wu_dm)
>>> from skbio.stats.spatial import procrustes
>>> print(procrustes(bc_pc.samples.values, wu_pc.samples.values)[2])
0.096574934963
All of this only gets interesting in the context of sample metadata, so
let's define some:
>>> import pandas as pd
>>> sample_md = [
... ('A', ['gut', 's1']),
... ('B', ['skin', 's1']),
... ('C', ['tongue', 's1']),
... ('D', ['gut', 's2']),
... ('E', ['tongue', 's2']),
... ('F', ['skin', 's2'])]
>>> sample_md = pd.DataFrame.from_items(
... sample_md, columns=['body_site', 'subject'], orient='index')
>>> sample_md
body_site subject
A gut s1
B skin s1
C tongue s1
D gut s2
E tongue s2
F skin s2
Now let's plot our PCoA results, coloring each sample by the subject it
was taken from:
>>> fig = wu_pc.plot(sample_md, 'subject',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by subject', cmap='jet', s=50)
.. plot::
:context:
We don't see any clustering/grouping of samples. If we were to instead color
the samples by the body site they were taken from, we see that the samples
form three separate groups:
>>> import matplotlib.pyplot as plt
>>> plt.close('all') # not necessary for normal use
>>> fig = wu_pc.plot(sample_md, 'body_site',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by body site', cmap='jet', s=50)
Ordination techniques, such as PCoA, are useful for exploratory analysis. The
next step is to quantify the strength of the grouping/clustering that we see in
ordination plots. There are many statistical methods available to accomplish
this; many operate on distance matrices. Let's use ANOSIM to quantify the
strength of the clustering we see in the ordination plots above, using our
weighted UniFrac distance matrix and sample metadata.
First test the grouping of samples by subject:
>>> from skbio.stats.distance import anosim
>>> results = anosim(wu_dm, sample_md, column='subject', permutations=999)
>>> results['test statistic']
-0.33333333333333331
>>> results['p-value'] < 0.1
False
The negative value of ANOSIM's R statistic indicates anti-clustering and the
p-value is insignificant at an alpha of 0.1.
Now let's test the grouping of samples by body site:
>>> results = anosim(wu_dm, sample_md, column='body_site', permutations=999)
>>> results['test statistic']
1.0
>>> results['p-value'] < 0.1
True
The R statistic of 1.0 indicates strong separation of samples based on body
site. The p-value is significant at an alpha of 0.1.
References
----------
.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._base import pw_distances, pw_distances_from_table
from ._unifrac import unweighted_unifrac, weighted_unifrac
__all__ = ["pw_distances", "pw_distances_from_table", "unweighted_unifrac",
"weighted_unifrac"]
test = TestRunner(__file__).test
|
bsd-3-clause
|
yungyuc/solvcon
|
sandbox/gas/tube/sodtube1d/handler_plot.py
|
1
|
5850
|
#
# handler_plot.py
#
# Description:
# provide many helper functions to plot and show the input solutions.
#
import sys
import scipy.optimize as so
import matplotlib.pyplot as plt
# a number to claim two floating number value are equal.
delta_precision = 0.0000000000001
def show_mesh_physical_model(bound=1, tube_radius=10, show_diaphragm=False, show_mesh=False):
"""
Show how 1D Sod tube may look like.
TODO:
1. make a interface for solution input
2. do not run this function immediately when importing this model. Too slow.
"""
from mpl_toolkits.mplot3d import axes3d
import numpy as np
# how many points you are going to use
# to visualize the model alone each axis
model_point_number = 30
# change unit
tube_radius = tube_radius*0.1
fig = plt.figure()
ax = axes3d.Axes3D(fig,azim=30,elev=30)
# build the tube
# generate mesh points
x_tube = np.linspace(-tube_radius, tube_radius, model_point_number)
y_tube = np.linspace(-1, 1, model_point_number)
x_tube_mesh, y_tube_mesh = np.meshgrid(x_tube ,y_tube)
# build the tube as a cylinder
z_tube_mesh = np.sqrt(tube_radius**2 - x_tube_mesh**2)
# show the tube as a wireframe
ax.plot_wireframe(x_tube_mesh ,y_tube_mesh , z_tube_mesh)
ax.plot_wireframe(x_tube_mesh ,y_tube_mesh ,-z_tube_mesh)
if show_diaphragm:
# build the diaphragm
x_diaphragm = np.linspace(-tube_radius, tube_radius, model_point_number)
z_diaphragm = np.linspace(-tube_radius, tube_radius, model_point_number)
x_diaphragm_mesh, z_diaphragm_mesh = \
np.meshgrid(x_diaphragm ,z_diaphragm)
y_diaphragm_mesh = \
np.zeros(shape=(model_point_number, model_point_number))
#ax.plot_surface(x_diaphragm_mesh, y_diaphragm_mesh, z_diaphragm_mesh)
ax.plot_wireframe(x_diaphragm_mesh,
y_diaphragm_mesh,
z_diaphragm_mesh,
color='red')
if show_mesh:
# mark the CESE mesh points
x_solution = np.zeros(shape=1)
y_solution = np.linspace(-1, 1, model_point_number)
x_solution_mesh, y_solution_mesh = np.meshgrid(x_solution, y_solution)
ax.scatter(x_solution_mesh,
y_solution_mesh,
x_solution_mesh,
color='green',
marker="o")
ax.set_xbound(lower=-bound, upper=bound)
ax.set_zbound(lower=-bound, upper=bound)
ax.set_xlabel('x-axis')
ax.set_ylabel('y-axis')
ax.set_zlabel('z-axis')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
#ax.set_axis_off()
def interact_with_mesh_physical_model():
"""
build an interactive bar for users to zoom in and out
the mesh physical model.
"""
from IPython.html.widgets import interact
interact(show_mesh_physical_model, bound=(1, 10), tube_radius=(1, 10))
class PlotManager():
"""
Manage how to show the data generated by SodTube.
Roughly speaking, it is a wrapper of matplotlib
"""
def __init__(self):
pass
def plot_mesh(self, mesh):
pass
def plot_solution(self):
pass
def show_solution_comparison(self):
plt.show()
def get_plot_solutions_fig_rho(self,
solution_a,
solution_b,
solution_a_label="series 1",
solution_b_label="series 2"):
return self.get_plot_solutions_fig(solution_a,
solution_b,
1,
solution_a_label,
solution_b_label)
def get_plot_solutions_fig_v(self,
solution_a,
solution_b,
solution_a_label="series 1",
solution_b_label="series 2"):
return self.get_plot_solutions_fig(solution_a,
solution_b,
2,
solution_a_label,
solution_b_label)
def get_plot_solutions_fig_p(self,
solution_a,
solution_b,
solution_a_label="series 1",
solution_b_label="series 2"):
return self.get_plot_solutions_fig(solution_a,
solution_b,
3,
solution_a_label,
solution_b_label)
def get_plot_solutions_fig(self,
solution_a,
solution_b,
item,
solution_a_label="series 1",
solution_b_label="series 2"):
ax = self.get_solution_value_list(solution_a, 0)
ay = self.get_solution_value_list(solution_a, item)
bx = self.get_solution_value_list(solution_b, 0)
by = self.get_solution_value_list(solution_b, item)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title(solution_a_label + " v.s. " + solution_b_label)
ax1.scatter(ax, ay, s=10, c='b', marker="s", label=solution_a_label)
ax1.scatter(bx, by, s=10, c='r', marker="o", label=solution_b_label)
plt.legend(loc='upper left')
return fig
def get_solution_value_list(self, solution, item):
solution_item_list = []
for i in solution:
solution_item_list.append(i[item])
return solution_item_list
|
bsd-3-clause
|
google/decoupled_gaussian_process
|
utils/numpy_data_interface.py
|
1
|
7072
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for processing and supplying data in numpy array format.
Feautures include, for example, generating random data, whitening data, sampling
minibatch of data, and computing hyperparameters based on heuristics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from decoupled_gaussian_process import utils
from decoupled_gaussian_process.utils import minibatch_index_manager
import numpy as np
import sklearn.preprocessing
import tensorflow as tf
Dataset = collections.namedtuple('Dataset', ['training', 'test'])
DataPoints = collections.namedtuple('DataPoints', ['x', 'y'])
NormalizationOps = collections.namedtuple(
'NormalizationOps',
['normalize_x', 'inv_normalize_x', 'normalize_y', 'inv_normalize_y'])
class NumpyDataInterface(object):
"""Data related stuff.
This class has functions for processing data, for example, generating random
data, whitening data, sampling minibatch of data, and computing
hyperparameters based on heuristics.
"""
def __init__(self, seed=0):
self.data = Dataset(DataPoints(None, None), DataPoints(None, None))
self.nomalized_data = Dataset(
DataPoints(None, None), DataPoints(None, None))
self.normalization_ops = NormalizationOps(None, None, None, None)
self._rng = np.random.RandomState(seed)
self._index_manager = None
def generate_data_with_random_uniform_extra_dimensions(
self,
func,
num_training_data_points,
num_test_data_points,
noise_stddev,
x_limit,
dim_extra=0):
"""Generates data.
Generates data with only the first dimension following `func`, other
dimensions are uniformly distributed noise.
Args:
func: function for the first dimension.
num_training_data_points: number of training samples.
num_test_data_points: number of test samples.
noise_stddev: standard deviation of Gaussian noise in training data.
x_limit: the domain of training data and the range of noisy dimensions.
dim_extra: extra noisy dimensions.
Returns:
`Dataset` namedtuple.
"""
x_training = self._rng.uniform(x_limit[0], x_limit[1],
(num_training_data_points, 1 + dim_extra))
y_training = (
func(x_training[:, [0]]) +
self._rng.randn(num_training_data_points, 1) * noise_stddev)
x_test = self._rng.uniform(x_limit[0], x_limit[1],
(num_test_data_points, 1 + dim_extra))
y_test = func(x_test[:, [0]])
return Dataset(
training=DataPoints(x=x_training, y=y_training),
test=DataPoints(x=x_test, y=y_test))
def prepare_data_for_minibatch(self,
data,
minibatch_size=None,
is_sequential=True,
with_whitening=False,
seed=0):
"""Prepare data with whitening for minibatch.
Args:
data: `Dataset` namedtuple.
minibatch_size: size of minibatch.
is_sequential: bool, indicates whether samples will be drawn randomly or
consecutively after shuffling.
with_whitening: bool, indicates whether data should be whitened.
seed: random seed.
"""
# Whitening or normalization.
if with_whitening:
x_scalar = sklearn.preprocessing.StandardScaler().fit(data.training.x)
y_scalar = sklearn.preprocessing.StandardScaler().fit(data.training.y)
self.normalization_ops = NormalizationOps(
normalize_x=lambda x: x_scalar.transform(x, copy=True),
inv_normalize_x=lambda x: x_scalar.inverse_transform(x, copy=True),
normalize_y=lambda x: y_scalar.transform(x, copy=True),
inv_normalize_y=lambda x: y_scalar.inverse_transform(x, copy=True))
else:
self.normalization_ops = NormalizationOps(
normalize_x=lambda x: x,
inv_normalize_x=lambda x: x,
normalize_y=lambda x: x,
inv_normalize_y=lambda x: x)
self.data = data
# Normalize all the data in advance.
training_data_points = DataPoints(
x=self.normalization_ops.normalize_x(self.data.training.x),
y=self.normalization_ops.normalize_y(self.data.training.y))
test_data_points = DataPoints(
x=self.normalization_ops.normalize_x(self.data.test.x),
y=self.normalization_ops.normalize_y(self.data.test.y))
self.normalized_data = Dataset(training_data_points, test_data_points)
# Setup index manager.
self._index_manager = minibatch_index_manager.MinibatchIndexManager(
self.data.training.x.shape[0], minibatch_size, is_sequential, seed)
def get_next_normalized_training_batch(self, increase_counter=True):
"""Retrieves the next batch of training data."""
idx = self._index_manager.get_next_minibatch_idx(increase_counter)
return (self.normalized_data.training.x[idx],
self.normalized_data.training.y[idx])
def build_feed_dict(self):
"""Build a feed_dict function.
The feed_dict function returns a minibatch of training dataset when
`during_training` is True, and returns the whole test dataset otherwise.
Returns:
Tuple, (tf.placeholder for x, tf.placeholder for y, feed_dict function).
"""
x_placeholder = tf.placeholder(
utils.tf_float, shape=[None, self.data.training.x.shape[1]])
y_placeholder = tf.placeholder(utils.tf_float, shape=[None, 1])
def feed_dict(during_training, idx_y=0):
if during_training:
x, y = self.get_next_normalized_training_batch()
else:
x = self.normalized_data.test.x
y = self.normalized_data.test.y
y = y[:, [idx_y]]
return {x_placeholder: x, y_placeholder: y}
return x_placeholder, y_placeholder, feed_dict
def sample_normalized_training_x(self, num_data_points_to_sample):
"""Sample x from normalized training data."""
idx = self._rng.randint(0, self.data.training.x.shape[0],
num_data_points_to_sample)
return self.normalized_data.training.x[idx]
def sample_normalized_training(self, num_data_points_to_sample):
"""Sample from normalized training data."""
idx = self._rng.randint(0, self.data.training.x.shape[0],
num_data_points_to_sample)
return (self.normalized_data.training.x[idx],
self.normalized_data.training.y[idx])
|
apache-2.0
|
rflamary/POT
|
examples/plot_UOT_1D.py
|
2
|
1681
|
# -*- coding: utf-8 -*-
"""
===============================
1D Unbalanced optimal transport
===============================
This example illustrates the computation of Unbalanced Optimal transport
using a Kullback-Leibler relaxation.
"""
# Author: Hicham Janati <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
from ot.datasets import make_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# make distributions unbalanced
b *= 5.
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Plot distributions and loss matrix
# ----------------------------------
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.legend()
# plot distributions and loss matrix
pl.figure(2, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
##############################################################################
# Solve Unbalanced Sinkhorn
# --------------
# Sinkhorn
epsilon = 0.1 # entropy parameter
alpha = 1. # Unbalanced KL relaxation parameter
Gs = ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, alpha, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gs, 'UOT matrix Sinkhorn')
pl.show()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.