repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
stevenzhang18/Indeed-Flask | lib/pandas/tseries/tests/test_util.py | 10 | 3569 | from pandas.compat import range
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas import Series, date_range
import pandas.util.testing as tm
from datetime import datetime, date
from pandas.tseries.tools import normalize_date
from pandas.tseries.util import pivot_annual, isleapyear
class TestPivotAnnual(tm.TestCase):
"""
New pandas of scikits.timeseries pivot_annual
"""
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'D')
doy = ts.index.dayofyear
doy[(~isleapyear(ts.index.year)) & (doy >= 60)] += 1
for i in range(1, 367):
subset = ts[doy == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
# check leap days
leaps = ts[(ts.index.month == 2) & (ts.index.day == 29)]
day = leaps.index.dayofyear[0]
leaps.index = leaps.index.year
leaps.name = 60
tm.assert_series_equal(annual[day].dropna(), leaps)
def test_hourly(self):
rng_hourly = date_range(
'1/1/1994', periods=(18 * 8760 + 4 * 24), freq='H')
data_hourly = np.random.randint(100, 350, rng_hourly.size)
ts_hourly = Series(data_hourly, index=rng_hourly)
grouped = ts_hourly.groupby(ts_hourly.index.year)
hoy = grouped.apply(lambda x: x.reset_index(drop=True))
hoy = hoy.index.droplevel(0).values
hoy[~isleapyear(ts_hourly.index.year) & (hoy >= 1416)] += 24
hoy += 1
annual = pivot_annual(ts_hourly)
ts_hourly = ts_hourly.astype(float)
for i in [1, 1416, 1417, 1418, 1439, 1440, 1441, 8784]:
subset = ts_hourly[hoy == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
leaps = ts_hourly[(ts_hourly.index.month == 2) &
(ts_hourly.index.day == 29) &
(ts_hourly.index.hour == 0)]
hour = leaps.index.dayofyear[0] * 24 - 23
leaps.index = leaps.index.year
leaps.name = 1417
tm.assert_series_equal(annual[hour].dropna(), leaps)
def test_weekly(self):
pass
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'M')
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
def test_period_monthly(self):
pass
def test_period_daily(self):
pass
def test_period_weekly(self):
pass
def test_normalize_date():
value = date(2012, 9, 7)
result = normalize_date(value)
assert(result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
result = normalize_date(value)
assert(result == datetime(2012, 9, 7))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
sonnyhu/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
Sentient07/scikit-learn | sklearn/linear_model/sag.py | 18 | 11273 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
emd/random_data | random_data/ensemble.py | 1 | 14616 | '''This module implements a class that conveniently defines *ensembles*
for the analysis of stationary and non-stationary random processes.
It is assumed that all spectral analysis is done with the FFT.
'''
# Standard library imports
import numpy as np
from matplotlib import mlab
class Ensemble(object):
'''A class that defines ensemble properties for a random process.
For random process `x`, statistical property `Q` is estimated as
Q = E[M{x(t)}]
where `E[...]` denotes the expectation value operator and
`M{...}` is the corresponding moment or transform corresponding
to property `Q`.
The expectation value is typically computed by averaging over
several "realizations" of the process. Each realization is
an independent measurement of the process, and the collection
of these realizations is known as the "ensemble". Typically,
random error in estimates of the process' properties decreases
as the number of realizations per ensemble increases;
however, for fixed ensemble size, the spectral resolution
typically decreases as the number of realizations increases.
For analysis of non-stationary signals, the ensemble time window
should be chosen small enough such that the statistical properties
of the process do not change appreciably over the time window
(this can be thought of as an "adiabatic" condition).
It is assumed that all spectral analysis is done with the FFT.
Attributes:
-----------
Nreal_per_ens - int
The number of realizations per ensemble.
Npts_per_real - int
The number of sample points per realization.
Npts_overlap - int
The number of overlapping points between adjacent realizations.
Npts_per_ens - int
The number of sample points per ensemble.
Fs - float
The signal sampling rate, as specified at object initialization.
[Fs] = arbitrary units
t - array_like, (`M`,)
The temporal midpoint of each ensemble.
[t] = 1 / [Fs]
f - array_like, (`L`,)
The frequencies at which spectral quantities can be estimated
with the defined ensemble.
[f] = [Fs]
dt - float
The temporal resolution between ensembles.
[dt] = 1 / [Fs]
df - float
The frequency resolution with which spectral quantities
can be estimated with the defined ensemble.
[df] = 1 / [Fs]
Methods:
--------
Type `help(Ensemble)` in the IPython console for a listing.
'''
def __init__(self, x, Fs=1.0, t0=0.,
Tens=40960., Nreal_per_ens=10, fraction_overlap=0.5,
Npts_per_real=None, Npts_overlap=None):
'''Create an instance of the `Ensemble` class.
Input Parameters:
-----------------
x - array_like, (`N`,)
The signal that is being split into ensembles.
[x] = arbitrary units
Fs - float
The sampling rate of `x`. If not specified, `Fs` is assigned
a value of unity such that all frequencies are *normalized*
to the sampling rate.
[Fs] = arbitrary units
t0 - float
The initial time corresponding to `x[0]`.
[t0] = 1 / [Fs]
Tens - float
The time window defining an ensemble. `Tens` determines the
time resolution of the spectral density calculations,
with larger `Tens` corresponding to reduced time resolution
and increased frequency resolution.
[Tens] = 1 / [Fs]
Nreal_per_ens - int
The number of realizations per ensemble. Typically,
increasing the number of realizations decreases random error
but decreases spectral resolution. A ValueError is raised
if not a positive integer.
fraction_overlap - float
The fractional overlap between adjacent realizations.
0 =< `fraction_overlap` < 1, otherwise a ValueError is raised.
Npts_per_real - int
The number of sample points per realization. If None,
`Tens` is used to compute `Npts_per_real` that is compatible
with `Nreal_per_ens` and efficient FFT computation.
If not None, `Tens` is ignored. A ValueError is raised
if not a positive integer.
Npts_overlap - int
The number of overlapping sample points between adjacent
realizations. If None, `fraction_overlap` sets the
number of overlapping sample points. If not None,
`fraction_overlap` is ignored. A ValueError is raised
if not a positive integer or if greater than or equal to
the number of points per realization.
'''
self.Fs = Fs
# Assign number of realizations per ensemble, if valid
if Nreal_per_ens > 0 and isinstance(Nreal_per_ens, int):
self.Nreal_per_ens = Nreal_per_ens
else:
raise ValueError('`Nreal_per_ens` must be a positive integer!')
# Assign number of sample points to use per realization
if Npts_per_real is None:
self.Npts_per_real = self.getNumPtsPerReal(
Fs, Tens, self.Nreal_per_ens, fraction_overlap)
elif Npts_per_real > 0 and isinstance(Npts_per_real, int):
self.Npts_per_real = Npts_per_real
else:
raise ValueError('`Npts_per_real` must be a positive integer!')
# Determine number of overlapping points between adjacent realizations
if Npts_overlap is None:
if fraction_overlap >= 0 and fraction_overlap < 1:
self.Npts_overlap = np.int(
fraction_overlap * self.Npts_per_real)
else:
raise ValueError('`fraction_overlap` must be between 0 and 1!')
else:
if Npts_overlap < 0 or not isinstance(Npts_overlap, int):
raise ValueError('`Npts_overlap` must be an integer >= 0!')
elif Npts_overlap >= self.Npts_per_real:
raise ValueError('`Npts_overlap` must be < `Npts_per_real`!')
else:
self.Npts_overlap = Npts_overlap
# Determine number of points per ensemble
self.Npts_per_ens = self.getNumPtsPerEns()
# Generate times `t` corresponding to the midpoint of each ensemble,
# and compute the frequencies `f` at which spectral estimates
# can be made with the defined ensemble
self.t = self.getTimes(x, Fs, t0)
self.f = self.getFrequencies(Fs)
# Determine resolution in time and frequency, if applicable
self.dt = self.getTens()
try:
self.df = self.f[1] - self.f[0]
except IndexError:
self.df = np.nan
def getNumPtsPerReal(self, Fs, Tens, Nreal_per_ens, fraction_overlap):
'''Get number of points per realization.
As the number of points must be a whole number, there will
generally be round-off error such that the resulting ensemble
time window is slightly different than the specified `Tens`.
Further, to ensure efficient FFT computation, the number of
points per ensemble is required to be a power of two,
potentially leading to even larger differences between
the resulting ensemble time window and the spec'd `Tens`.
This function should be called *before* `self.getNumPtsPerEns(...)`.
'''
# If a given ensemble of length `Tens` consists of `Nreal_per_ens`
# (potentially overlapping) realizations, each of length `Treal`,
# then
#
# Tens = Treal * {1 + [(Nreal_per_ens - 1) * (1 - fraction_overlap)]}
#
# where `fraction_overlap` is the fractional overlap between
# adjacent realizations. `Treal` is easily solved for.
denominator = 1 + ((Nreal_per_ens - 1) * (1 - fraction_overlap))
Treal = np.float(Tens) / denominator # avoid integer division!
# Ensure the number of points per realization is a power of 2,
# allowing efficient computation of the FFT. In the past,
# rounding `Treal * Fs` to the next largest power of 2 led
# to normalization errors, so we always round down to the
# largest power of 2 less than `Treal * Fs`.
return _largest_power_of_2_leq(Treal * Fs)
def getNumPtsPerEns(self):
'Get number of points per ensemble.'
# In a given ensemble, there are `Nreal_per_ens` (potentially
# overlapping) realizations. Each of these realizations
# contains `Npts_per_real` sample points. Thus, the first
# realization contributes `Npts_per_real` sample points.
Npts_per_ens = self.Npts_per_real
# If there `Npts_overlap` overlapping sample points between
# adjacent realizations, the remaining (`Nreal_per_ens` - 1)
# realizations each contribute (`Npts_per_real` - `Npts_overlap`)
# distinct sample points.
distinct_points_per_real = self.Npts_per_real - self.Npts_overlap
Npts_per_ens += ((self.Nreal_per_ens - 1) * distinct_points_per_real)
return Npts_per_ens
def getFrequencies(self, Fs):
'''Get frequencies at which spectral quantities can be estimated
with the defined ensemble. Only f >= 0 is returned.
'''
return np.fft.rfftfreq(self.Npts_per_real, d=(1. / Fs))
def getTens(self):
'''Get temporal length of each ensemble. In general, the
ensemble time window will slightly differ from that specified
during object initialization; this is to ensure efficient FFT
computation. This method returns the true ensemble time length.
'''
# avoid integer division!
return self.Npts_per_ens / np.float(self.Fs)
def getTimes(self, x, Fs, t0):
'Get times corresponding to the midpoint of each ensemble.'
# The ensemble forms the basic unit/discretization of time
# for the computed spectral density estimate, so determine
# ensemble time length.
Tens = self.getTens()
# Determine the number of *whole* ensembles in the data record
# (Disregard fractional ensemble at the end of the data, if present)
Nens = np.int(len(x) / self.Npts_per_ens)
# The returned time base corresponds to the midpoint of each ensemble
return t0 + (Tens * np.arange(0.5, Nens, 1))
def getFFTs(self, x, detrend=mlab.detrend_none,
window=mlab.window_hanning):
'''Get array of FFTs corresponding to each realization of `x`.
Parameters:
-----------
x - array_like, (`N`,)
Signal to be analyzed. Signal is split into several
realizations, and the FFT of each realization is computed.
[x] = arbitrary units
detrend - string
The function applied to each realization before taking FFT.
May be [ 'default' | 'constant' | 'mean' | 'linear' | 'none']
or callable, as specified in :py:func: `csd <matplotlib.mlab.csd>`.
*Warning*: Naively detrending (even with something as simple as
`mean` or `linear` detrending) can introduce detrimental artifacts
into the computed spectrum, so *no* detrending is the default.
window - callable or ndarray
The window applied to each realization before taking FFT,
as specified in :py:func: `csd <matplotlib.mlab.csd>`.
Returns:
--------
Xk - array_like, (L, M, N) where
L = `len(self.f)` = `(self.Npts_per_real // 2) + 1`,
M = number of whole ensembles in data record `x`, and
N = `self.Nreal_per_ens`
The FFTs of each realization in each ensemble.
The FFTs are indexed by frequency, ensemble, and realization.
[Xk] = [x]
'''
# Only real-valued signals are expected/supported at the moment
if np.iscomplexobj(x):
raise ValueError('`x` must be a real-valued signal!')
# Determine the number of *whole* ensembles in the data record
# (Disregard fractional ensemble at the end of the data, if present)
Nens = np.int(len(x) / self.Npts_per_ens)
# Determine number of frequencies in 1-sided FFT, noting that
# `self.Npts_per_real` is constrained to be a power of 2
Nf = (self.Npts_per_real // 2) + 1
# Initialize.
Xk = np.zeros(
(Nf, Nens, self.Nreal_per_ens),
dtype='complex')
# Loop through each ensemble, computing the FFT of each realization
# via strides for efficient use of memory. (Note that the below
# procedure closely parallels that of Matplotlib's internal function
#
# :py:func:`_spectral_helper <matplotlib.mlab._spectral_helper>`
#
# Here, we use our own implementation so as not to rely on
# an internal function)
stride_axis = 0
for ens in np.arange(Nens):
# Split the ensemble into realizations
sl = slice(
ens * self.Npts_per_ens,
(ens + 1) * self.Npts_per_ens)
result = mlab.stride_windows(
x[sl],
self.Npts_per_real,
self.Npts_overlap,
axis=stride_axis)
# Detrend each realization
result = mlab.detrend(
result,
detrend,
axis=stride_axis)
# Window each realization (power loss compensated outside loop)
result, windowVals = mlab.apply_window(
result,
window,
axis=stride_axis,
return_window=True)
# Finally compute and return the FFT of each realization
Xk[:, ens, :] = np.fft.rfft(result, axis=stride_axis)
# Compensate for windowing power loss
norm = np.sqrt(np.mean((np.abs(windowVals)) ** 2))
Xk /= norm
return Xk
def _largest_power_of_2_leq(x):
'Get the largest power of 2 that is less than or equal to `x`.'
exponent = np.log2(x) # exact
exponent = np.int(exponent) # next lowest power of 2
return 2 ** exponent
def closest_index(v, val):
'Return integer index of entry in `v` closest in value to `val`.'
delta = np.abs(v - val)
return np.where(delta == np.min(delta))[0][0]
| gpl-2.0 |
mtat76/atm-py | atmPy/for_removal/mie/bhmie.py | 6 | 20987 | # from numpy import *
import numpy as np
import pandas as pd
class bhmie_hagen():
"""
This file is converted from mie.m, see http://atol.ucsd.edu/scatlib/index.htm
Bohren and Huffman originally published the code in their book on light scattering
Calculation based on Mie scattering theory
Parameters
----------
x: size parameter = k*radius = 2pi/lambda * radius
(lambda is the wavelength in the medium around the scatterers)
refrel - refraction index (n in complex form for example: 1.5+0.02*i;
nang - number of angles for S1 and S2 function in range from 0 to pi/2
input optional:
diameter - to calculate the crosssections this value is needed, and yes this is really diameter not radius
I am an idiot
Returns
-------
S1, S2 - funtion which correspond to the (complex) phase functions
Qext - extinction efficiency
Qsca - scattering efficiency
Qback - backscatter efficiency
gsca - asymmetry parameter"""
def __init__(self, x, refrel, noOfAngles, diameter=False):
self.diameter = diameter
self.noOfAngles = noOfAngles
self.sizeParameter = x
self.indOfRefraction = refrel
self.normalizer = (
4 * np.pi) ** 2 # hagen: the physical origin is not clear to me right now, but this normalizer
# is necessary so the integral of the scattering function is equal to the
# scattering crossection and the integral over the phase function is 4 pi
s1_1=np.zeros(self.noOfAngles,dtype=np.complex128)
s1_2=np.zeros(self.noOfAngles,dtype=np.complex128)
s2_1=np.zeros(self.noOfAngles,dtype=np.complex128)
s2_2=np.zeros(self.noOfAngles,dtype=np.complex128)
pi=np.zeros(self.noOfAngles,dtype=np.complex128)
tau=np.zeros(self.noOfAngles,dtype=np.complex128)
if (self.noOfAngles > 1000):
print ('error: self.noOfAngles > mxself.noOfAngles=1000 in bhmie')
return
# Require NANG>1 in order to calculate scattering intensities
if (self.noOfAngles < 2):
self.noOfAngles = 2
pii = 4.*np.arctan(1.)
self.calc_noOfTerms()
dang = .5*pii/ (self.noOfAngles-1)
amu=np.arange(0.0,self.noOfAngles,1)
amu=np.cos(amu*dang)
pi0=np.zeros(self.noOfAngles,dtype=np.complex128)
pi1=np.ones(self.noOfAngles,dtype=np.complex128)
# TODO - get rid of this nonsense! Why rename?
logDeriv = self.get_logDeriv()
# Riccati-Bessel functions with real argument X
# calculated by upward recurrence
psi0 = np.cos(self.sizeParameter)
psi1 = np.sin(self.sizeParameter)
chi0 = -np.sin(self.sizeParameter)
chi1 = np.cos(self.sizeParameter)
xi1 = psi1-chi1*1j
qsca = 0.
gsca = 0.
p = -1
for n in range(0, self.noOfTermses[0]):
en = n+1.0
fn = (2.*en+1.)/(en* (en+1.))
# for given N, PSI = psi_n CHI = chi_n
# PSI1 = psi_{n-1} CHI1 = chi_{n-1}
# PSI0 = psi_{n-2} CHI0 = chi_{n-2}
# Calculate psi_n and chi_n
psi = (2.*en-1.)*psi1/self.sizeParameter - psi0
chi = (2.*en-1.)*chi1/self.sizeParameter - chi0
xi = psi-chi*1j
# Store previous values of AN and BN for use
# in computation of g=<cos(theta)>
if (n > 0):
an1 = an
bn1 = bn
'''
These are the key parameters for the Mie calculations, an and bn,
used to comute the amplitudes of the scattering field.
'''
an = (logDeriv[n]/self.indOfRefraction+en/self.sizeParameter)*psi - psi1
an /= ((logDeriv[n]/self.indOfRefraction+en/self.sizeParameter)*xi-xi1)
bn = (self.indOfRefraction*logDeriv[n]+en/self.sizeParameter)*psi - psi1
bn /= ((self.indOfRefraction*logDeriv[n]+en/self.sizeParameter)*xi-xi1)
#*** Augment sums for Qsca and g=<cos(theta)>
qsca += (2.*en+1.)* (abs(an)**2+abs(bn)**2)
gsca += ((2.*en+1.)/ (en* (en+1.)))*( np.real(an)* np.real(bn)+np.imag(an)*np.imag(bn))
if (n > 0):
gsca += ((en-1.)* (en+1.)/en)*( np.real(an1)* np.real(an)+np.imag(an1)*np.imag(an)+np.real(bn1)* np.real(bn)+np.imag(bn1)*np.imag(bn))
#*** Now calculate scattering intensity pattern
# First do angles from 0 to 90
pi=0+pi1 # 0+pi1 because we want a hard copy of the values
tau=en*amu*pi-(en+1.)*pi0
s1_1 += fn* (an*pi+bn*tau)
s2_1 += fn* (an*tau+bn*pi)
#*** Now do angles greater than 90 using PI and TAU from
# angles less than 90.
# P=1 for N=1,3,...% P=-1 for N=2,4,...
# remember that we have to reverse the order of the elements
# of the second part of s1 and s2 after the calculation
p = -p
s1_2+= fn*p* (an*pi-bn*tau)
s2_2+= fn*p* (bn*pi-an*tau)
psi0 = psi1
psi1 = psi
chi0 = chi1
chi1 = chi
xi1 = psi1-chi1*1j
#*** Compute pi_n for next value of n
# For each angle J, compute pi_n+1
# from PI = pi_n , PI0 = pi_n-1
pi1 = ((2.*en+1.)*amu*pi- (en+1.)*pi0)/ en
pi0 = 0+pi # 0+pi because we want a hard copy of the values
#*** Have summed sufficient terms.
# Now compute QSCA,QEXT,QBACK,and GSCA
# we have to reverse the order of the elements of the second part of s1 and s2
s1=np.concatenate((s1_1,s1_2[-2::-1]))
s2=np.concatenate((s2_1,s2_2[-2::-1]))
gsca = 2.*gsca/qsca
qsca = (2./ (self.sizeParameter**2))*qsca
# qext = (4./ (self.sizeParameter**2))* real(s1[0])
# more common definition of the backscattering efficiency,
# so that the backscattering cross section really
# has dimension of length squared
# qback = 4*(abs(s1[2*self.noOfAngles-2])/self.sizeParameter)**2
#qback = ((abs(s1[2*self.noOfAngles-2])/self.sizeParameter)**2 )/pii #old form
self.s1 = s1
self.s2 = s2
# self.qext = qext
self.calc_qext()
self.qsca = qsca
# self.qback = qback
self.calc_qback()
self.gsca = gsca
if self.diameter:
self.csca = self.qsca * self.diameter**2 * np.pi * 0.5**2 #scattering crosssection
else:
self.csca = 0
def get_logDeriv(self):
""" Logarithmic derivative D(J) calculated by downward recurrence
beginning with initial value (0.,0.) at J=NMX
"""
y = self.sizeParameter * self.indOfRefraction
nn = int(self.noOfTermses[1]) - 1
d=np.zeros(nn+1,dtype=np.complex128)
for n in range(0,nn):
en = self.noOfTermses[1] - n
d[nn-n-1] = (en/y) - (1./ (d[nn-n]+en/y))
return d
def get_natural(self):
return np.abs(self.s1)**2 + np.abs(self.s2)**2
def get_perpendicular(self):
return np.abs(self.s1)**2
def get_parallel(self):
return np.abs(self.s2)**2
def calc_noOfTerms(self):
"""Original comment:
Series expansion terminated after NSTOP (noOfTerms) terms
Logarithmic derivatives calculated from NMX on down
BTD experiment 91/1/15: add one more term to series and compare resu<s
NMX=AMAX1(XSTOP,YMOD)+16
test: compute 7001 wavelen>hs between .0001 and 1000 micron
for a=1.0micron SiC grain. When NMX increased by 1, only a single
computed number changed (out of 4*7001) and it only changed by 1/8387
conclusion: we are indeed retaining enough terms in series!
"""
ymod = abs(self.sizeParameter*self.indOfRefraction)
xstop = self.sizeParameter + 4.*self.sizeParameter**0.3333 + 2.0
#xstop = x + 4.*x**0.3333 + 10.0
nmx = max(xstop,ymod) + 15.0
nmx=np.fix(nmx)
self.noOfTermses = (int(xstop),nmx)
# Hagen: now idea, what the following is exactly doing?!?
nmxx=150000
if (nmx > nmxx):
raise ValueError( "error: nmx > nmxx=%f for |m|x=%f" % ( nmxx, ymod) )
def calc_qsca(self):
"""scattering efficiency"""
return 0
def calc_qext(self):
"""extinction efficiency. normalized real part of s1 at 0 deg (forward)"""
self.qext = (4./ (self.sizeParameter**2))* np.real(self.s1[0])
if self.diameter:
self.cext = self.qext * self.diameter**2 * np.pi * 0.5**2
else:
self.cext = 0
def calc_qback(self):
""" Backscattering efficiency. Looks like it simpy locks for the efficiency
at 180 deg... I am surprised why they are not simpy taking the last one?
-> it is the same!! -> fixed"""
self.qback = 4*(abs(self.s1[-1])/self.sizeParameter)**2
def get_phase_func(self):
""" Returns the phase functions in the interval [0,2*pi).
Note
----
The phase phase function is normalized such that the integrale over the entire sphere is 4pi
"""
# out = self.get_angular_scatt_func() * 4 * np.pi/self.csca
s2r = self.s2[::-1]
s2f = np.append(self.s2, s2r[1:])
s2s = np.abs(s2f) ** 2
# ang = np.linspace(0, np.pi * 2, len(s2s))
# df = pd.DataFrame(s2s, index=ang, columns=['Phase_function_parallel'])
# df.index.name = 'Angle'
s1r = self.s1[::-1]
s1f = np.append(self.s1, s1r[1:])
s1s = np.abs(s1f) ** 2
s12s = (s1s + s2s) / 2
ang = np.linspace(0, np.pi * 2, len(s1s))
df = pd.DataFrame(np.array([s1s, s2s, s12s]).transpose(), index=ang,
columns=['perpendicular', 'parallel', 'natural'])
df.index.name = 'angle'
df *= 4 * np.pi / (np.pi * self.sizeParameter ** 2 * self.qsca)
return df
def get_angular_scatt_func(self):
"""
Returns the angular scattering function for parallel scattering geometry in the interval [0,2*pi).
Note
----
The integral of 'natural' over the entire sqhere is equal to the scattering crossection.
>>> natural = natural[theta < np.pi] # to ensure integration from 0 to pi
>>> theta = theta[theta < np.pi]
>>> integrate.simps(natural * np.sin(theta) ,theta) * 2 * np.pi # this is equal to scattering crossection
"""
df = self.get_phase_func()
df *= self.csca / (4 * np.pi)
return df
# def get_phase_func_parallel(self):
# """
# Returns the angular scattering function for parallel scattering geometry in the interval [0,2*pi).
#
# Note
# ----
# This is not exactly the phase function since it is not normalized to the integrated intensity.
# However, normalizing here results in the loss of information, which might be valuable later. To get the
# phase function multiply this value by 4*pi/self.csca.
# """
# s2r = self.s2[::-1]
# s2f = np.append(self.s2, s2r[1:])
# s2s = np.abs(s2f) ** 2 / self.normalizer
# ang = np.linspace(0, np.pi * 2, len(s2s))
# df = pd.DataFrame(s2s, index=ang, columns=['Phase_function_parallel'])
# df.index.name = 'Angle'
# return df
#
# def get_phase_func_perp(self):
# """
# Returns the angular scattering function for perpendicular scattering geometry in the interval [0,2*pi)
#
# Note
# ----
# This is not exactly the phase function since it is not normalized to the integrated intensity.
# However, normalizing here results in the loss of information, which might be valuable later. To get the
# phase function multiply this value by 4*pi/self.csca.
# """
# s1r = self.s1[::-1]
# s1f = np.append(self.s1, s1r[1:])
# s1s = np.abs(s1f) ** 2 / self.normalizer
# ang = np.linspace(0, np.pi * 2, len(s1s))
# df = pd.DataFrame(s1s, index=ang, columns=['Phase_function_perp'])
# df.index.name = 'Angle'
# return df
def return_Values_as_dict(self):
# pFperp = self.get_phase_func_perp()
# pFpara = self.get_phase_func_parallel()
# pFnat = pd.DataFrame((pFperp.iloc[:, 0] + pFpara.iloc[:, 0]) / 2., columns=['Phase_function_natural'])
return { # 'phaseFct_S1': self.s1,
# 'phaseFct_S2': self.s2,
# 'angular_scattering_function_perp': pFperp,
# 'angular_scattering_function_parallel': pFpara,
# 'angular_scattering_function_natural': pFnat,
'extinction_efficiency': self.qext,
'scattering_efficiency': self.qsca,
'backscatter_efficiency': self.qback,
'asymmetry_parameter': self.gsca,
'scattering_crosssection': self.csca,
'extinction_crosssection': self.cext}
def return_Values(self):
return self.s1, self.s2, self.qext, self.qsca, self.qback, self.gsca
def bhmie(x,refrel,nang):
""" This file is converted from mie.m, see http://atol.ucsd.edu/scatlib/index.htm
Bohren and Huffman originally published the code in their book on light scattering
Calculation based on Mie scattering theory
input:
x - size parameter = k*radius = 2pi/lambda * radius
(lambda is the wavelength in the medium around the scatterers)
refrel - refraction index (n in complex form for example: 1.5+0.02*i;
nang - number of angles for S1 and S2 function in range from 0 to pi/2
output:
S1, S2 - funtion which correspond to the (complex) phase functions
Qext - extinction efficiency
Qsca - scattering efficiency
Qback - backscatter efficiency
gsca - asymmetry parameter"""
nmxx=150000
s1_1=zeros(nang,dtype=complex128)
s1_2=zeros(nang,dtype=complex128)
s2_1=zeros(nang,dtype=complex128)
s2_2=zeros(nang,dtype=complex128)
pi=zeros(nang,dtype=complex128)
tau=zeros(nang,dtype=complex128)
if (nang > 1000):
print('error: nang > mxnang=1000 in bhmie')
return
# Require NANG>1 in order to calculate scattering intensities
if (nang < 2):
nang = 2
pii = 4.*arctan(1.)
dx = x
drefrl = refrel
y = x*drefrl
ymod = abs(y)
# Series expansion terminated after self.noOfTerms terms
# Logarithmic derivatives calculated from NMX on down
xstop = x + 4.*x**0.3333 + 2.0
#xstop = x + 4.*x**0.3333 + 10.0
nmx = max(xstop,ymod) + 15.0
nmx=fix(nmx)
# BTD experiment 91/1/15: add one more term to series and compare resu<s
# NMX=AMAX1(XSTOP,YMOD)+16
# test: compute 7001 wavelen>hs between .0001 and 1000 micron
# for a=1.0micron SiC grain. When NMX increased by 1, only a single
# computed number changed (out of 4*7001) and it only changed by 1/8387
# conclusion: we are indeed retaining enough terms in series!
nstop = int(xstop)
if (nmx > nmxx):
print( "error: nmx > nmxx=%f for |m|x=%f" % ( nmxx, ymod) )
return
dang = .5*pii/ (nang-1)
amu=arange(0.0,nang,1)
amu=cos(amu*dang)
pi0=zeros(nang,dtype=complex128)
pi1=ones(nang,dtype=complex128)
# Logarithmic derivative D(J) calculated by downward recurrence
# beginning with initial value (0.,0.) at J=NMX
nn = int(nmx)-1
d=zeros(nn+1,dtype=complex128)
for n in range(0,nn):
en = nmx - n
d[nn-n-1] = (en/y) - (1./ (d[nn-n]+en/y))
#*** Riccati-Bessel functions with real argument X
# calculated by upward recurrence
psi0 = cos(dx)
psi1 = sin(dx)
chi0 = -sin(dx)
chi1 = cos(dx)
xi1 = psi1-chi1*1j
qsca = 0.
gsca = 0.
p = -1
for n in range(0,nstop):
en = n+1.0
fn = (2.*en+1.)/(en* (en+1.))
# for given N, PSI = psi_n CHI = chi_n
# PSI1 = psi_{n-1} CHI1 = chi_{n-1}
# PSI0 = psi_{n-2} CHI0 = chi_{n-2}
# Calculate psi_n and chi_n
psi = (2.*en-1.)*psi1/dx - psi0
chi = (2.*en-1.)*chi1/dx - chi0
xi = psi-chi*1j
#*** Store previous values of AN and BN for use
# in computation of g=<cos(theta)>
if (n > 0):
an1 = an
bn1 = bn
#*** Compute AN and BN:
an = (d[n]/drefrl+en/dx)*psi - psi1
an = an/ ((d[n]/drefrl+en/dx)*xi-xi1)
bn = (drefrl*d[n]+en/dx)*psi - psi1
bn = bn/ ((drefrl*d[n]+en/dx)*xi-xi1)
#*** Augment sums for Qsca and g=<cos(theta)>
qsca += (2.*en+1.)* (abs(an)**2+abs(bn)**2)
gsca += ((2.*en+1.)/ (en* (en+1.)))*( real(an)* real(bn)+imag(an)*imag(bn))
if (n > 0):
gsca += ((en-1.)* (en+1.)/en)*( real(an1)* real(an)+imag(an1)*imag(an)+real(bn1)* real(bn)+imag(bn1)*imag(bn))
#*** Now calculate scattering intensity pattern
# First do angles from 0 to 90
pi=0+pi1 # 0+pi1 because we want a hard copy of the values
tau=en*amu*pi-(en+1.)*pi0
s1_1 += fn* (an*pi+bn*tau)
s2_1 += fn* (an*tau+bn*pi)
#*** Now do angles greater than 90 using PI and TAU from
# angles less than 90.
# P=1 for N=1,3,...% P=-1 for N=2,4,...
# remember that we have to reverse the order of the elements
# of the second part of s1 and s2 after the calculation
p = -p
s1_2+= fn*p* (an*pi-bn*tau)
s2_2+= fn*p* (bn*pi-an*tau)
psi0 = psi1
psi1 = psi
chi0 = chi1
chi1 = chi
xi1 = psi1-chi1*1j
#*** Compute pi_n for next value of n
# For each angle J, compute pi_n+1
# from PI = pi_n , PI0 = pi_n-1
pi1 = ((2.*en+1.)*amu*pi- (en+1.)*pi0)/ en
pi0 = 0+pi # 0+pi because we want a hard copy of the values
#*** Have summed sufficient terms.
# Now compute QSCA,QEXT,QBACK,and GSCA
# we have to reverse the order of the elements of the second part of s1 and s2
s1=concatenate((s1_1,s1_2[-2::-1]))
s2=concatenate((s2_1,s2_2[-2::-1]))
gsca = 2.*gsca/qsca
qsca = (2./ (dx*dx))*qsca
qext = (4./ (dx*dx))* real(s1[0])
# more common definition of the backscattering efficiency,
# so that the backscattering cross section really
# has dimension of length squared
qback = 4*(abs(s1[2*nang-2])/dx)**2
#qback = ((abs(s1[2*nang-2])/dx)**2 )/pii #old form
return s1,s2,qext,qsca,qback,gsca
if __name__ == "__main__":
# x = 10
x_sizePara = 5
n_refraction = 1.5 + 0.01j
nang_no = 10
bhh = bhmie_hagen(x_sizePara, n_refraction, nang_no)
s1,s2,qext,qsca,qback,gsca = bhh.return_Values()
s1,s2,qext,qsca,qback,gsca = bhmie(x_sizePara,n_refraction,nang_no)
def test_extinction_coeff():
wl = .55
d = .1
ref = 1.455
sp = lambda wl,d : 2*np.pi/wl * d/2
mie = bhmie_hagen(sp(wl,d),ref, 100, diameter=d)
mo_I = mie.return_Values_as_dict()
wl = .55
d = .1
ref = 1.1
sp = lambda wl,d : 2*np.pi/wl * d/2
mie = bhmie_hagen(sp(wl,d),ref, 100, diameter=d)
mo_II = mie.return_Values_as_dict()
wl = .55
d = .1
ref = 4.
sp = lambda wl,d : 2*np.pi/wl * d/2
mie = bhmie_hagen(sp(wl,d),ref, 100, diameter=d)
mo_III = mie.return_Values_as_dict()
test_I_is = mo_II['extinction_crosssection']/mo_I['extinction_crosssection']
test_I_should = 0.0527297452683
test_II_is = mo_III['extinction_crosssection']/mo_I['extinction_crosssection']
test_II_should = 14.3981634837
print('test value 1 is/should be: %s/%s'%(test_I_is, test_I_should))
print('test value 2 is/should be: %s/%s'%(test_II_is, test_II_should))
| mit |
georgebv/coastlib | tests/plotting/test_rose.py | 1 | 4524 | from coastlib.plotting.rose import get_rose_parameters, rose_plot
import pandas as pd
import pytest
import matplotlib.pyplot as plt
import os
import numpy as np
from matplotlib.testing.compare import compare_images
plt.ioff()
data_folder = os.sep.join([*os.path.realpath(__file__).split(os.sep)[:-2], '_common_data'])
@pytest.fixture
def wind_data():
data = pd.read_pickle(os.path.join(data_folder, 'rr_wind_speed.pyc'))
return data[~np.isnan(data['s']) & ~np.isnan(data['d'])]
def test_get_rose_parameters_basic(wind_data):
theta, radii, bottoms, colors, calm_percentage, value_bin_boundaries = get_rose_parameters(
values=wind_data['s'].values, directions=wind_data['d'].values,
value_bin_boundaries=np.arange(6, 18, 3), calm_value=3, n_dir_bins=6,
center_on_north=True
)
assert np.isclose(calm_percentage, 6.656072526441932)
assert np.allclose(value_bin_boundaries, np.array([3., 6., 9., 12., 15., np.inf]))
assert theta.shape == (5, 6)
assert np.isclose(theta[0][0], 0)
assert np.isclose(theta[0][-1], 5.235987755982988)
assert radii.shape == (5, 6)
assert np.isclose(radii[0][0], 2.6157517233680334)
assert np.isclose(radii[0][-1], 3.009265719628255)
assert np.isclose(radii.mean(), 3.333333333333333)
assert np.isclose(radii.sum(), 100)
assert np.isclose(bottoms[-1].sum() + radii[-1].sum(), 100)
assert np.isclose(colors.sum(), 11.528529)
assert np.isclose(colors.mean(), 0.57642645)
def test_get_rose_parameters_custom(wind_data):
theta, radii, bottoms, colors, calm_percentage, value_bin_boundaries = get_rose_parameters(
values=wind_data['s'].values, directions=wind_data['d'].values,
value_bin_boundaries=np.arange(6, 18, 3), calm_value=None, n_dir_bins=16,
center_on_north=False
)
assert np.isclose(calm_percentage, 20.19942166414839)
assert np.allclose(value_bin_boundaries, np.array([6., 9., 12., 15., np.inf]))
assert theta.shape == (4, 16)
assert np.isclose(theta[0][0], 0.19634954084936207)
assert np.isclose(theta[0][-1], 6.086835766330224)
assert radii.shape == (4, 16)
assert np.isclose(radii[0][0], 0.9532437218944086)
assert np.isclose(radii[0][-1], 2.268034506116919)
assert np.isclose(radii.mean(), 1.5625)
assert np.isclose(radii.sum(), 100)
assert np.isclose(bottoms[-1].sum() + radii[-1].sum(), 100)
assert np.isclose(colors.sum(), 9.198019)
assert np.isclose(colors.mean(), 0.5748761875)
def run_rose_plot(data, figname, **kwargs):
fig, ax = rose_plot(values=data['s'].values, directions=data['d'].values, **kwargs)
figure_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'baseline_images')
baseline_path = os.path.join(figure_path, f'rose_plot_{figname}.png')
compare_path = os.path.join(figure_path, f'rose_plot_{figname}_compare.png')
fig.savefig(compare_path, dpi=100)
comparison = compare_images(baseline_path, compare_path, .001)
os.remove(compare_path)
plt.close(fig)
return comparison
def test_rose_plot_basic(wind_data):
run_rose_plot(
data=wind_data, figname='basic',
value_bin_boundaries=np.arange(1, 18, 3), n_dir_bins=12, cmap=plt.get_cmap('Blues'), rose_type='bar',
fig=None, ax=None, center_on_north=True, calm_size=1.5, title='Wind Rose',
value_name='Wind Speed [m/s]', rwidths=None
)
def test_rose_plot_contour(wind_data):
run_rose_plot(
data=wind_data, figname='contour',
value_bin_boundaries=np.arange(0, 18, 3), n_dir_bins=12, cmap=plt.get_cmap('jet'), rose_type='contour',
fig=None, ax=None, center_on_north=True, calm_size=None, title='Rose Plot Contourf',
value_name='Wind Speed', rwidths=None
)
def test_rose_plot_contourf(wind_data):
run_rose_plot(
data=wind_data, figname='contourf',
value_bin_boundaries=np.arange(0, 18, 3), n_dir_bins=12, cmap=plt.get_cmap('viridis'), rose_type='contourf',
fig=None, ax=None, center_on_north=True, calm_size=None, title='Rose Plot Contourf',
value_name='Wind Speed', rwidths=None
)
def test_rose_plot_bargeom(wind_data):
run_rose_plot(
data=wind_data, figname='bargeom',
value_bin_boundaries=np.arange(1, 22, 2), n_dir_bins=16, cmap=plt.get_cmap('magma'), rose_type='bar',
fig=None, ax=None, center_on_north=False, calm_size=1.5, title='Rose Plot Geomspace',
value_name='Wind Speed', rwidths=None, geomspace=True
)
| gpl-3.0 |
appapantula/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
trankmichael/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
johnnycakes79/pyops | pyops/draw.py | 1 | 35960 | """
This module makes pretty orbit graphics.
"""
from utils import getclosest, getorbelts, planetmu
from datetime import datetime
from PyAstronomy import pyasl
from PyAstronomy import constants as consts
import matplotlib.pyplot as plt
import numpy as np
import svgfig as svg
import inspect
import spice
import math
import os
from io import StringIO
import telnetlib
import socket
consts.setSystem('SI')
class Horizons(telnetlib.Telnet, object):
MERCURY = 199
VENUS = 299
EARTH = 399
MARS = 499
JUPITER = 599
SATURN = 699
URANUS = 799
NEPTUNE = 899
PLUTO = 999
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
super(Horizons, self).__init__("localhost", 6775, timeout)
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
super(Horizons, self).open(host, port, timeout)
# Disable pager on start
self._check_main()
self.sendline("page")
def sendline(self, s=""):
self.write(s.encode('ascii') + b"\n")
def elements(self, body, start_date, end_date, delta):
"""Compute osculatory elements, selected a body.
Columns available:
0JDCT Epoch Julian Date, Coordinate Time
2EC Eccentricity, e
3QR Periapsis distance, q (km)
4IN Inclination w.r.t xy-plane, i (degrees)
5OM Longitude of Ascending Node, OMEGA, (degrees)
6W Argument of Perifocus, w (degrees)
7Tp Time of periapsis (Julian day number)
8N Mean motion, n (degrees/sec)
9MA Mean anomaly, M (degrees)
10TA True anomaly, nu (degrees)
11A Semi-major axis, a (km)
12AD Apoapsis distance (km)
13PR Sidereal orbit period (sec)
# TODO: Better specify time delta
# TODO: Better specify the body, from a list / dict
# TODO: Choose reference
"""
self._check_main()
self._select_body(body)
self.expect([b"Observe.*\] :"])
self.sendline("e")
idx, _, _ = self.expect([b"Coordinate.*\] :",
b"Use previous center.*\] :"])
if idx == 1:
self.sendline("n")
self.expect([b"Coordinate.*\] :"])
self.sendline("sun")
self.expect([b"Reference.*\] :"])
self.sendline("eclip")
self.expect([b"Starting.*\] :"])
self.sendline(str(start_date))
self.expect([b"Ending.*\] :"])
self.sendline(str(end_date))
self.expect([b"Output.*\] :"])
self.sendline(delta)
self.expect([b"Accept.*\] :"])
self.sendline("n")
self.expect([b"Output reference.*\] :"])
self.sendline()
self.expect([b"Output units.*\] :"])
self.sendline("1")
self.expect([b"Spreadsheet.*\] :"])
self.sendline("yes")
self.expect([b"Label.*\] :"])
self.sendline("no")
self.expect([b"Type.*\] :"])
self.sendline()
data = self.read_until(b"$$EOE").decode('ascii')
ephem_str = data.partition("$$SOE")[-1].partition("$$EOE")[0].strip()
# n_lines = len(ephem_str.splitlines())
ephem_data = np.loadtxt(StringIO(ephem_str), delimiter=",",
usecols=(0, 2, 4, 5, 6, 10, 11), unpack=True)
jd, ecc, inc, omega, argp, nu, a = ephem_data
self.expect([b".*Select.* :"])
self.sendline("N")
self.expect([b"\n"])
# return (jd, a, ecc, radians(inc), radians(omega), radians(argp),
# radians(nu))
return (jd, a, ecc, inc, omega, argp, nu)
def vectors(self, body, start_date, end_date, delta):
"""Compute position and velocity vector."""
self._check_main()
self._select_body(body)
self.expect([b"Observe.*\] :"])
self.sendline("v")
idx, _, _ = self.expect([b"Coordinate.*\] :",
b"Use previous center.*\] :"])
if idx == 1:
self.sendline("n")
self.expect([b"Coordinate.*\] :"])
self.sendline("@sun")
self.expect([b"Reference.*\] :"])
self.sendline("eclip")
self.expect([b"Starting.*\] :"])
self.sendline(str(start_date))
self.expect([b"Ending.*\] :"])
self.sendline(str(end_date))
self.expect([b"Output.*\] :"])
self.sendline(delta)
self.expect([b"Accept.*\] :"])
self.sendline("n")
self.expect([b"Output reference.*\] :"])
self.sendline("J2000")
self.expect([b"Corrections.* :"])
self.sendline("1")
self.expect([b"Output units.*\] :"])
self.sendline("1")
self.expect([b"Spreadsheet.*\] :"])
self.sendline("yes")
self.expect([b"Label.*\] :"])
self.sendline("no")
self.expect([b"Select output table.*\] :"])
self.sendline("2")
data = self.read_until(b"$$EOE").decode('ascii')
ephem_str = data.partition("$$SOE")[-1].partition("$$EOE")[0].strip()
# n_lines = len(ephem_str.splitlines())
ephem_data = np.loadtxt(StringIO(ephem_str), delimiter=",",
usecols=(0,) + tuple(range(2, 8)), unpack=True)
jd, x, y, z, vx, vy, vz = ephem_data
r = np.column_stack((x, y, z))
v = np.column_stack((vx, vy, vz))
self.expect([b".*Select.* :"])
self.sendline("N")
self.expect([b"\n"])
return jd, r, v
def _select_body(self, body):
self.sendline(str(body))
self.expect([b"Select .*, \?, <cr>:"])
self.sendline("e")
self.expect([b"\n"])
def _check_main(self):
idx, _, _ = self.expect([b"Horizons>"])
if idx == -1:
raise RuntimeError("I am lost!")
def _gatherhorizonsdata(delta="1d", scale=15):
print("Building orbit for planets from JPL Horizons...")
start_date = datetime(2024, 5, 7)
end_date = datetime(2025, 5, 6)
jpl = Horizons()
rlist = []
rmins = []
plnts = ("MERCURY", "VENUS", "EARTH")
for planet in plnts:
print(" > {}".format(planet))
# Planet state and velocity vectors
jd, r, v = jpl.vectors(getattr(jpl, planet), start_date, end_date,
delta)
jd, a, ecc, inc, omega, argp, nu = jpl.elements(getattr(jpl, planet),
start_date,
end_date,
delta)
print(jd, a, ecc, inc, omega, argp, nu)
# set AU in kilometers
AU = 1 # consts.AU / 1000 # 149597871.
scale = 1
rv = [math.sqrt(x ** 2 + y ** 2 + z ** 2) for (x, y, z) in r]
xy = [((x / AU) * scale, (y / AU) * scale) for (x, y) in r[:, 0:2]]
i = rv.index(min(rv))
rmin = math.degrees(math.atan2(r[i, 1], r[i, 0]))
rmins.append(rmin)
rlist.append(xy)
return rlist, rmins, jd.tolist()
def _gatherorbitdata(delta="1d", scale=15, verbose=False):
# print("Building orbit for planets with SPICE...")
spice.kclear()
# Load the kernels that this program requires.
this_dir = os.path.dirname(os.path.realpath(__file__))
spice.furnsh(os.path.join(this_dir, 'pyops.mk'))
# convert starting epoch to ET
et0 = spice.str2et('2024/05/07 00:00')
rate = 24 * 2 # Every 30 mins
days = [(et0 + (day * (86400 / rate))) for day in range(366 * rate)]
# internal variables and constants
planets = ("MERCURY", "VENUS", "EARTH")
AU = consts.AU / 1000. # AU [km]
argps = []
argpxys = []
xyvecs = []
nuvecs = []
for planet in planets:
# print(" > {}".format(planet))
dates = []
rvec = [] # vector of centric radii
xyvec = [] # vector of (x,y) coordinates
nuvec = [] # vector of nu (True Anomaly) values
incvec = [] # vector of inclination values
for et in days:
if verbose:
print('ET Seconds Past J2000: {}'.format(et))
# Compute the apparent state of MERCURY as seen from
# the SUN in ECLIPJ2000
starg, ltime = spice.spkezr(planet, et, 'ECLIPJ2000',
'NONE', 'SUN')
x, y, z, vx, vy, vz = [el / AU * scale for el in starg]
r = math.sqrt(x ** 2 + y ** 2 + z ** 2)
if verbose:
print('\nApparent state of MERCURY as seen from',
' Sun in the J2000:')
print(' X = {:10.4f} km (LT+S)'.format(x))
print(' Y = {:10.4f} km (LT+S)'.format(y))
print(' Z = {:10.4f} km (LT+S)'.format(z))
print('VX = {:10.4f} km/s (LT+S)'.format(vx))
print('VY = {:10.4f} km/s (LT+S)'.format(vy))
print('VZ = {:10.4f} km/s (LT+S)'.format(vz))
# calculate orbital elements from the starg state vector
elts = spice.oscelt(starg, et, planetmu('Sun'))
# define a solver for Kepler's equation
ks = pyasl.MarkleyKESolver()
# solve for the Eccentric Anomaly (E) with the
# Mean Anomaly (M = elts[5]) and the
# Eccentricity (ecc = elts[1])
E = ks.getE(elts[5], elts[1])
# calculate the True Anomaly (nu) from E and ecc (elts[1])
nuarg1 = math.sqrt(1 - elts[1]) * math.cos(E / 2)
nuarg2 = math.sqrt(1 + elts[1]) * math.sin(E / 2)
# atan2 in python needs the arguments as (y,x)
# rather than (x,y) ...?
nu = 2 * math.atan2(nuarg2, nuarg1)
rvec.append(r) # append r for each day
xyvec.append((x, y)) # append (x,y) coords for each day
nuvec.append(nu) # append True anomaly for each day
# build date in ISO format
date = '{} {}'.format(spice.et2utc(et, 'ISOC', 0).split('T')[0],
spice.et2utc(et, 'ISOC', 0).split('T')[1])
dates.append(date) # append date for each day
incvec.append(elts[2]) # append inc. for each day (rads)
# print(date, nu * spice.dpr(), x, y, z, r, elts[0])
# for this planet find the argument of pericenter (argp):
# find the index of the min. r value for calculated range.
argpi = rvec.index(min(rvec))
# calculate argp x and y values and argp using atan2
argpxy = (xyvec[argpi][0], xyvec[argpi][1] * math.cos(incvec[argpi]))
argp = math.degrees(math.atan2(argpxy[1], argpxy[0]))
argpxys.append(argpxy) # append argp (x,y) coords.
argps.append(argp) # append argp
xyvecs.append(xyvec) # append (x,y) coords. vector
nuvecs.append(nuvec) # append true anomaly vector
spice.kclear()
return days, dates, xyvecs, argps, argpxys, nuvecs
def _frmline(a, b, c, d, line_op=1.0):
return svg.Line(a, b, c, d, stroke_width="0.15pt",
stroke_dasharray="2, 2", stroke_opacity=line_op)
def _gradient(id, colors, gradrot, rotang, x, y):
# TODO: Fix the gradient rotation...
# print("Building gradient for {}".format(id))
xp = x * math.cos(math.radians(rotang)) - \
y * math.sin(math.radians(rotang))
yp = x * math.sin(math.radians(rotang)) + \
y * math.cos(math.radians(rotang))
return svg.SVG("linearGradient",
svg.SVG("stop", stop_color=colors[0], stop_opacity=1,
offset="40%"),
svg.SVG("stop", stop_color=colors[1], stop_opacity=1,
offset="60%"),
x1="0%", y1="0%", x2="100%", y2="0%",
spreadMethod="pad",
id="{}Grad".format(id),
gradientTransform="rotate({}, {}, {})".format(45, xp, yp))
def _outerframe(date, frmSize=15, frm_op=0.5, diag_scl=0.65, mpoargp=False,
frm_font_size=3, frm_ticks=8, frm_miniticks=False):
# print("Building outer frame...")
callerfunc = inspect.stack()[1][3]
frmSize = frmSize * 1.2
frm = svg.LineAxis(frmSize, 0, frmSize, 2 * math.pi, 0, 2 * math.pi)
frm.text_start = -2.5
frm.text_angle = 180.
frm.text_attr["font-size"] = frm_font_size
frm.text_attr["opacity"] = frm_op
frm.attr["stroke_opacity"] = frm_op
frm.ticks = [x * 2 * math.pi / frm_ticks for x in range(frm_ticks)]
if callerfunc == 'planetsplot':
frm.labels = lambda x: "%g" % (x * 180 / math.pi)
else:
frm.labels = False
if frm_miniticks:
frm_miniticks = [x * 2 * math.pi / frm_ticks / 9 for x in
range(frm_ticks * 9)]
frm.miniticks = frm_miniticks
# Makes a circle out of the Line Axis.
frm_plot = svg.Fig(frm, trans="x*cos(y), x*sin(y)")
# Draw the vertical ...
xs = 0.9
frmLine1 = _frmline(0, -frmSize * xs, 0, frmSize * xs, line_op=frm_op)
# ... and horizontal frame lines through the sun.
frmLine2 = _frmline(-frmSize * xs, 0, frmSize * xs, 0, line_op=frm_op)
# Draw the diagonal frame lines.
frmLine3 = _frmline(-frmSize * diag_scl, -frmSize * diag_scl,
frmSize * diag_scl, frmSize * diag_scl,
line_op=frm_op)
frmLine4 = _frmline(-frmSize * diag_scl, frmSize * diag_scl,
frmSize * diag_scl, -frmSize * diag_scl,
line_op=frm_op)
# And there was light...
sun_ball = _sun()
# Metadata
callerfunc = inspect.stack()[1][3]
if callerfunc == 'planetsplot':
titletag = 'Planetary Constellation Mid-Season'
if callerfunc == 'mpoplot':
titletag = 'MPO Orbit Mid-Season'
refdata = svg.Fig()
textop = 6.0
# TODO: fix the messy way the placement x, y are defined.
metatitle = svg.Text(-frmSize - 3.5, frmSize + 3, titletag,
font_size=frm_font_size, opacity=textop,
text_anchor="start")
metadate = svg.Text(-frmSize - 3.5, frmSize + 1, "{}".format(date),
font_size=frm_font_size, opacity=textop,
text_anchor="start")
if callerfunc == 'mpoplot' and mpoargp:
metaargp = svg.Text(-frmSize - 3.5, frmSize - 1,
"Arg. Periherm: {:6.1f}degsym".format(mpoargp),
font_size=frm_font_size, opacity=textop,
text_anchor="start")
else:
metaargp = svg.Fig()
if callerfunc == 'planetsplot':
xy = (-frmSize + 6.8, -frmSize - 1.8)
reforb, grad = _planetdiag("MERCURY", xy, 0)
reforbtext1 = svg.Text(-frmSize + 4, -frmSize - 2, "Descending",
font_size=frm_font_size, opacity=textop,
text_anchor="end")
reforbtext2 = svg.Text(-frmSize + 9, -frmSize - 2.0, "Ascending",
font_size=frm_font_size, opacity=textop,
text_anchor="start")
refdata = svg.Fig(reforb, reforbtext1, reforbtext2)
return svg.Fig(frm_plot,
svg.Fig(frmLine1, frmLine2, frmLine3, frmLine4),
sun_ball,
metatitle,
metadate,
metaargp,
refdata)
def _orbitdot(a, b, theta, r_dot_adj=0.1, color="#C8C5E2", r_dot_size=0.6,
rot_x=0.0, rot_y=0.0, dot_op=1.0, dot_str_op=1.0):
if theta > 180:
r_dot_adj = r_dot_adj * -1.0
r_dot = _rellipse(a, b, theta) # +r_dot_adj*sin(theta)
r_trans = svg.rotate(theta, rot_x, rot_y)
# print(r_dot)
ret_dot = svg.Fig(svg.Dots([(r_dot, 0)],
svg.make_symbol("dot_{}_{}".format(theta, color),
fill=color, fill_opacity=dot_op,
stroke="black", stroke_width="0.15pt",
stroke_opacity=dot_str_op),
r_dot_size, r_dot_size), trans=r_trans)
#print(defs)
return ret_dot
def _planetdiag(name, rpos, rotang=0.0, orb_scl=1.0, frmSizecl=10.0,
diag_op=1.0):
# print("Building {} diagram...".format(name))
colors = []
if name == "MERCURY":
colors = ["#C8C5E2", "#373163"]
if name == "VENUS":
diag_op = 0.4
colors = ["#EDE051", "#393506"]
if name == "EARTH":
colors = ["#00AFEF", "#003C52"]
# colorurl="url(#{}Grad)".format(name[0:4].lower())
# Scale the position vector ...
# rpos = [x*frmSizecl*orb_scl for x in rpos]
# Simplify ...
r_x = rpos[0]
r_y = rpos[1]
gradrot = math.degrees(math.atan2(r_y, r_x))
# Build a white ball for background ...
ball_bg = _planetdot(name, rpos, r_dot_size=2.0 * orb_scl,
dot_color="white", dot_str_op=diag_op)
# ... and a color ball for foreground.
ball_fg = _planetdot(name, rpos, r_dot_size=2.0 * orb_scl,
dot_color=colors[0], dot_op=diag_op,
dot_str_op=diag_op)
# Stack coloured ball on top of white background ball...
ball = svg.Fig(ball_bg, ball_fg)
grad = _gradient(name[0:4].lower(), colors, gradrot, rotang, r_x, r_y)
if name == "MERCURY":
# print("Buidling MPO orbit schematic...")
# MPO line scaling factor
mpo_line_sf = 2.0
# MPO line start and end points
mpo_line_st = r_x - orb_scl * mpo_line_sf
mpo_line_en = r_x + orb_scl * mpo_line_sf * 0.720811474
node_size = 0.15
x1 = mpo_line_st - node_size
x2 = mpo_line_st + node_size
y1 = r_y - node_size
y2 = r_y + node_size
dec_node = svg.Fig(svg.Rect(x1=x1, y1=y1, x2=x2, y2=y2, fill="black"),
trans=svg.rotate(0, mpo_line_st, r_y))
x1 = mpo_line_en - node_size
x2 = mpo_line_en + node_size
y1 = r_y - node_size
y2 = r_y + node_size
asc_node = svg.Fig(svg.Rect(x1=x1, y1=y1, x2=x2, y2=y2, fill="black"),
trans=svg.rotate(45, mpo_line_en, r_y))
mpo_line = svg.Fig(svg.Line(mpo_line_st, r_y, mpo_line_en, r_y,
stroke_width="0.15pt",),
asc_node,
dec_node,
trans=svg.rotate(-rotang, r_x, r_y))
# r_trans = rotate(theta, 0, 0)
ball.d.append(svg.Fig(mpo_line))
return svg.Fig(ball, trans=svg.rotate(rotang, 0, 0)), grad
def _planetdot(name, rpos, dot_color="#C8C5E2", r_dot_size=0.6,
dot_op=1.0, dot_str_op=1.0):
r_x = rpos[0]
r_y = rpos[1]
cname = dot_color.replace("#", "")
ret_dot = svg.Fig(svg.Dots([(r_x, r_y)],
svg.make_symbol("dot_{}_{}".format(name, cname),
fill=dot_color, fill_opacity=dot_op,
stroke="black", stroke_width="0.15pt",
stroke_opacity=dot_str_op),
r_dot_size, r_dot_size))
return ret_dot
def _planetdotang(a, b, theta, r_dot_adj=0.23, dot_color="#C8C5E2",
r_dot_size=0.6, rot_x=0.0, rot_y=0.0, dot_op=1.0,
dot_str_op=1.0):
if theta < 180:
r_dot_adj = r_dot_adj * -1.0
r_dot = _rellipse(a, b, theta)
r_trans = svg.rotate(theta, rot_x, rot_y)
# print(r_dot)
ret_dot = svg.Fig(svg.Dots([(r_dot, 0)],
svg.make_symbol("dot_{}_{}".format(theta, dot_color),
fill=dot_color, fill_opacity=dot_op,
stroke="black", stroke_width="0.15pt",
stroke_opacity=dot_str_op),
r_dot_size, r_dot_size), trans=r_trans)
# print(theta)
# print(r_dot*cos(radians(theta)), r_dot*sin(radians(theta)))
return ret_dot
def _rellipse(a, b, theta):
rret = (b ** 2) / (a - math.sqrt(a ** 2 - b ** 2) *
math.cos(math.radians(180 - theta)))
return rret
def _sun(id="Sun", posx=0, posy=0, size=1.5, fill="yellow",
stroke="orange", stroke_width="0.1pt"):
return svg.Dots([(0, 0)], svg.make_symbol(id, stroke=stroke,
fill=fill, stroke_width=stroke_width), size, size)
def planetsplot(userdates=None, delta="1d", master_scale=15, demo=False,
showplots=False):
"""
... explain what this does...
"""
outdir = './sample_data/output'
# if demo:
# shutil.rmtree(outdir)
# os.makedirs(outdir)
# else:
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# else:
# print('\n Uh-oh! The directory {} already exists.'.format(
# outdir))
# if yesno(' Do you want to replace it?'):
# shutil.rmtree(outdir)
# os.makedirs(outdir)
# else:
# return
orbitdata = _gatherorbitdata(delta=delta, scale=master_scale)
ets, dates, orbits, argps, argpxys, nus = orbitdata
if userdates is None:
userdates = dates
if showplots:
plt.subplot(1, 1, 1)
for xy in orbits:
plt.plot([x[0] for x in xy], [y[1] for y in xy],
'rx', label='SPICE')
for xy in argpxys:
plt.plot(xy[0], xy[1], 'go')
plt.show()
if len(orbits[0]) == len(dates) == len(ets):
# This rotation will put the Hermean perihelion on the X-axis.
rotang = -argps[0]
# Load the kernels that this program requires.
spice.kclear()
this_dir = os.path.dirname(os.path.realpath(__file__))
spice.furnsh(os.path.join(this_dir, 'pyops.mk'))
output_files = []
# A graphic will be created for each 'date' in 'userdates':
for date in userdates:
# get the position-index of the 'et' in the 'orbitdata' list
# of 'ets' that is closest to the 'date' in the 'userdates'
et = spice.str2et(date)
dx = ets.index(getclosest(ets, et))
# -- Outer frame -------------------------------------------------
# Opacity of degree frame and Venus graphic
frame_op = 0.8
# Process calendar time strings
date = '{} {}'.format(spice.et2utc(et, 'ISOC', 0).split('T')[0],
spice.et2utc(et, 'ISOC', 0).split('T')[1])
edate, etime = date.split()
eyear = "{}".format(edate.split('-')[0])
emonth = "{0:02d}".format(int(edate.split('-')[1]))
eday = "{0:02d}".format(int(edate.split('-')[2]))
epoch = "{}/{}/{}".format(eday, emonth, eyear)
ep_name = "{}{}{}_{}".format(eyear, emonth, eday,
etime.replace(':', ''))
frame = _outerframe(epoch, frmSize=master_scale, frm_op=frame_op)
# -- First Point of Aires ----------------------------------------
# merc_loan = 48.331
# merc_argp = 29.124
arend = svg.make_marker("fopa_arrowend", "arrow_end",
fill_opacity=0.4)
x1, y1 = 10, 0
x2, y2 = master_scale * 1.3, 0
fpoa = svg.Line(x1, y1, x2, y2, stroke_width=".4pt",
stroke_opacity=0.4, arrow_end=arend)
xp = (x2 * math.cos(math.radians(rotang)) -
y2 * math.sin(math.radians(rotang)))
yp = (x2 * math.sin(math.radians(rotang)) +
y2 * math.cos(math.radians(rotang)))
fpoa_text = svg.Text(xp + 6.5, yp - 1.0, "First Point of Aries",
font_size=3, opacity=0.75)
fpoa = svg.Fig(svg.Fig(fpoa, trans=svg.rotate(rotang, 0, 0)),
fpoa_text)
# -- Some containers ---------------------------------------------
orbs = []
circles = []
defs = svg.SVG("defs")
# -- Orbit circles -----------------------------------------------
# Build the SVG for each orbit.
for orbit in orbits:
if orbits.index(orbit) == 1:
orbit_op = 0.4
else:
orbit_op = 1.0
# Mercury's orbit will have perihelion on the X-axis
circles.append(svg.Fig(svg.Poly(orbit, stroke_width=".25pt",
stroke_opacity=orbit_op),
trans=svg.rotate(rotang, 0, 0)))
# -- Planet orbs -------------------------------------------------
points = [orbits[0][dx], orbits[1][dx], orbits[2][dx]]
# Build the planet orb for each planet for this chart.
for point in points:
# Planetary inputs ...
if points.index(point) == 0:
name = "MERCURY"
nu = math.degrees(math.atan2(point[1], point[0])) + rotang
if nu < 0:
nu = nu + 360
# print(nu, nu-rotang, rotang)
nu = "{0:03d}".format(int(nu))
if points.index(point) == 1:
name = "VENUS"
if points.index(point) == 2:
name = "EARTH"
# point_r = [x/AU for x in point]
orb, grad = _planetdiag(name, point, rotang)
orbs.append(orb)
defs.append(grad)
# -- Build final figure ------------------------------------------
wa = master_scale * 1.5
svgout = svg.Fig(fpoa, frame,
circles[0], circles[1], circles[2],
orbs[0], orbs[1], orbs[2]
).SVG(svg.window(-wa, wa, -wa, wa))
svgout.prepend(defs)
out_path = os.path.join(outdir,
"merc_orbit_plot_{}_{}.svg".format(
ep_name, nu))
svgout.save(out_path)
output_files.append(out_path)
spice.kclear()
return output_files
else:
# You'll jump to hear if the epochs for all 3 planets are not equal.
print("There is an epoch error between the planet time values...")
def mpoplot(userdates, master_scale=15, demo=False):
"""
... explain what this does...
"""
outdir = '../sample_data/output'
# if demo:
# shutil.rmtree(outdir)
# os.makedirs(outdir)
# else:
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# else:
# print('\n Uh-oh! The directory {} already exists.'.format(
# outdir))
# if yesno(' Do you want to replace it?'):
# shutil.rmtree(outdir)
# os.makedirs(outdir)
# else:
# return
# Clear and load the kernels that this program requires.
spice.kclear()
spice.furnsh('pyops.mk')
# A graphic will be created for each 'date' in 'dates':
for date in userdates:
et = spice.str2et(date)
datestr = (spice.et2utc(et, 'ISOC', 0))
# -- Outer frame -------------------------------------------------
dist_scl = 250.0
elts = getorbelts(date)
arg_peri = elts[4]
# Opacity of degree frame and Venus graphic
frame_op = 0.5
# # Process JD time into calendar time strings
# datestr = spice.et2utc(et, 'ISOC', 0)
date = '{} {}'.format(datestr.split('T')[0],
datestr.split('T')[1])
edate, etime = date.split()
eyear = "{}".format(edate.split('-')[0])
emonth = "{0:02d}".format(int(edate.split('-')[1]))
eday = "{0:02d}".format(int(edate.split('-')[2]))
epoch = "{}/{}/{}".format(eday, emonth, eyear)
ep_name = "{}{}{}".format(eyear, emonth, eday)
frame = _outerframe(epoch, frmSize=master_scale, frm_op=frame_op,
mpoargp=arg_peri)
# -- Mercury Planet --------------------------------------------------
# tru_ano = 90
# look_from = 270
# x1 = "{}%".format((100*math.sin(math.radians((tru_ano+90)/2.))))
# x2 = "{}%".format(100-(100*sin(radians((tru_ano+90)/2.))))
angs = range(0, 360, 1)
plt.plot(angs, ["{}".format((100 * math.sin(math.radians(x / 2))))
for x in angs], 'yo-')
plt.plot(angs, ["{}".format(100 - (100 *
math.sin(math.radians(x / 2)))) for x in angs], 'ro-')
# plt.show()
stop1 = "#C8C5E2"
# stop2 = "#373163"
defs = svg.SVG("defs",
svg.SVG("linearGradient",
svg.SVG("stop", stop_color=stop1,
stop_opacity=1, offset="45%"),
svg.SVG("stop", stop_color=stop1,
stop_opacity=1, offset="55%"),
x1="0%", y1="0%", x2="100%", y2="0%",
spreadMethod="pad",
id="mercGrad")
)
# defs = svg.SVG('defs',
# svg.SVG('radialGradient',
# svg.SVG('stop',
# stop_color=stop1,
# stop_opacity=1,
# offset='38%'),
# svg.SVG('stop',
# stop_color=stop2,
# stop_opacity=1,
# offset='40%'),
# cx='50%', cy='50%',
# fx='230%', fy='50%',
# r='300%',
# spreadMethod='pad',
# id='mercGrad')
# )
merc_rad = 2439.99 # km
merc_rad_scl = merc_rad / dist_scl
merc_ball = svg.Ellipse(0, 0, 0, merc_rad_scl, merc_rad_scl,
fill="url(#mercGrad)", stroke_width="0.15pt")
# -- MPO Orbit --
mpo_orb_ecc = 0.163229
mpo_orb_sma = 3394.0 # km
mpo_orb_sma_scl = mpo_orb_sma / dist_scl
mpo_orb_smi_scl = mpo_orb_sma_scl * math.sqrt(1 - mpo_orb_ecc ** 2)
# Make things cleaner
a = mpo_orb_sma_scl
b = mpo_orb_smi_scl
mpo_orb = svg.Ellipse(-math.sqrt(a ** 2 - b ** 2), 0, 0, a, b,
fill="none", stroke_width="0.25pt")
# apof = 8
mpo_orb_apses = svg.Line(-_rellipse(a, b, 180) - 5, 0,
_rellipse(a, b, 0) + 10, 0,
stroke_width="0.15pt",
stroke_dasharray="2, 2")
dot_angs = range(0, 360, 20)
dots = [_orbitdot(a, b, x, color="black") for x in dot_angs]
mpo_orb_dots = svg.Fig()
for dot in dots:
mpo_orb_dots.d.append(dot)
mpo_orb_trans = svg.rotate(arg_peri, 0, 0)
mpo_orb_plot = svg.Fig(mpo_orb, mpo_orb_apses, mpo_orb_dots,
trans=mpo_orb_trans)
# -- Direction arrow -------------------------------------------------
dirarend = svg.make_marker("dirarrowend", "arrow_end",
fill_opacity=0.2)
dirarend.attr["markerWidth"] = 7.5
x1, y1 = master_scale + 1, 0.4,
x2, y2 = master_scale + 1, 1
dirarwstrt = svg.Line(x1, y1, x2, y2, stroke_width=".4pt",
stroke_opacity=0.2, arrow_end=dirarend)
dirarw = svg.Fig(dirarwstrt, trans="x*cos(y), x*sin(y)")
# -- Apsis view ------------------------------------------------------
apvx, apvy = master_scale + 3, -master_scale - 3
apsisviewball = svg.Ellipse(apvx, apvy,
0, merc_rad_scl * 0.25,
merc_rad_scl * 0.25,
fill="url(#mercGrad)",
stroke_width="0.15pt")
apsisviewlats = svg.Fig()
for x in range(-9, 10, 3):
hscl = math.sin(math.radians(x * 10))
wscl = math.cos(math.radians(x * 10))
x1 = apvx - (merc_rad_scl * 0.25 * wscl)
y1 = apvy + (merc_rad_scl * 0.25 * hscl)
x2 = apvx + (merc_rad_scl * 0.25 * wscl)
y2 = apvy + (merc_rad_scl * 0.25 * hscl)
apsisviewlats.d.append(svg.Line(x1, y1, x2, y2,
stroke_width=".2pt",
stroke_opacity=0.4))
apvarend = svg.make_marker("apvarrowend",
"arrow_end",
fill_opacity=0.6)
apvarend.attr["markerWidth"] = 3.0
apvarend.attr["markerHeight"] = 3.0
x1, y1 = apvx, apvy - 3
x2, y2 = apvx, apvy + 3
apsisvieworbit = svg.Line(x1, y1, x2, y2,
stroke_width=".4pt",
stroke_opacity=0.6,
arrow_end=apvarend)
xd = apvx
yd = apvy + (merc_rad_scl * 0.25 * math.sin(math.radians(arg_peri)))
apsisviewdot = svg.Fig(svg.Dots([(xd, yd)],
svg.make_symbol("apsisdot",
fill="black",
fill_opacity=0.6
),
0.6, 0.6
)
)
apsisview = svg.Fig(apsisviewball,
apsisviewlats,
apsisvieworbit,
apsisviewdot)
# -- Build final figure ----------------------------------------------
wa = master_scale * 1.5
svgout = svg.Fig(frame,
merc_ball,
mpo_orb_plot,
dirarw,
apsisview
).SVG(svg.window(-wa, wa, -wa, wa))
svgout.prepend(defs)
argp = int(arg_peri)
svgout.save(os.path.join(outdir,
"mpo_orbit_plot_{}_{}.svg".format(ep_name,
argp)
)
)
if __name__ == '__main__':
# I want plots for these dates...
dates = ("2024-May-07 00:00", "2024-May-09 14:31", "2024-May-28 09:14",
"2024-Jun-13 15:50", "2024-Jun-29 22:26", "2024-Jul-27 15:28",
"2024-Aug-24 08:30", "2024-Sep-09 15:06", "2024-Sep-25 21:42",
"2024-Oct-23 14:44", "2024-Nov-20 07:46", "2024-Dec-06 14:22",
"2024-Dec-22 20:58", "2025-Jan-19 14:00", "2025-Feb-16 07:02",
"2025-Mar-04 13:38", "2025-Mar-20 20:14", "2025-Apr-17 13:16",
"2025-May-03 02:19")
# dates = ("2024-May-07 00:00", "2024-May-09 14:31")
# ... some planetary constellation plots...
planetsplot(userdates=dates, demo=True)
# ... and some spacecraft orbit plots.
# mpoplot(dates, demo=True)
| bsd-3-clause |
qifeigit/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
shusenl/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
aktech/sympy | sympy/plotting/plot.py | 7 | 65097 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
import inspect
from collections import Callable
import warnings
import sys
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
def _arity(f):
"""
Python 2 and 3 compatible version that do not raise a Deprecation warning.
"""
if sys.version_info < (3,):
return len(inspect.getargspec(f)[0])
else:
param = inspect.signature(f).parameters.values()
return len([p for p in param if p.kind == p.POSITIONAL_OR_KEYWORD])
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = _arity(c)
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = _arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
ghost9023/DeepLearningPythonStudy | DeepLearning/DeepLearning/09_Deep_SongJW/cnn_code_base.py | 1 | 8454 |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import pickle
import numpy as np
from collections import OrderedDict
from book.common.layers import *
from book.common.gradient import numerical_gradient
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from book.common.trainer import Trainer
class SimpleConvNet:
"""단순한 합성곱 신경망
conv - relu - pool - affine - relu - affine - softmax
Parameters
----------
input_size : 입력 크기(MNIST의 경우엔 784)
hidden_size_list : 각 은닉층의 뉴런 수를 담은 리스트(e.g. [100, 100, 100])
output_size : 출력 크기(MNIST의 경우엔 10)
activation : 활성화 함수 - 'relu' 혹은 'sigmoid'
weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)
'relu'나 'he'로 지정하면 'He 초깃값'으로 설정
'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정
"""
def __init__(self, input_dim=(1, 28, 28),
conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01):
filter_num = conv_param['filter_num']
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad']
filter_stride = conv_param['stride']
input_size = input_dim[1]
conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1
pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2))
# 가중치 초기화
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
self.params['b1'] = np.zeros(filter_num)
self.params['W2'] = weight_init_std * \
np.random.randn(pool_output_size, hidden_size)
self.params['b2'] = np.zeros(hidden_size)
self.params['W3'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b3'] = np.zeros(output_size)
# 계층 생성
self.layers = OrderedDict()
self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],
conv_param['stride'], conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
self.layers['Relu2'] = Relu()
self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
self.last_layer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
"""손실 함수를 구한다.
Parameters
----------
x : 입력 데이터
t : 정답 레이블
"""
y = self.predict(x)
return self.last_layer.forward(y, t)
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1: t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i * batch_size:(i + 1) * batch_size]
tt = t[i * batch_size:(i + 1) * batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def numerical_gradient(self, x, t):
"""기울기를 구한다(수치미분).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
"""
loss_w = lambda w: self.loss(x, t)
grads = {}
for idx in (1, 2, 3):
grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])
return grads
def gradient(self, x, t):
"""기울기를 구한다(오차역전파법).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
"""
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 결과 저장
grads = {}
grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db
grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
def save_params(self, file_name="params.pkl"):
params = {}
for key, val in self.params.items():
params[key] = val
with open(file_name, 'wb') as f:
pickle.dump(params, f)
def load_params(self, file_name="params.pkl"):
with open(file_name, 'rb') as f:
params = pickle.load(f)
for key, val in params.items():
self.params[key] = val
for i, key in enumerate(['Conv1', 'Affine1', 'Affine2']):
self.layers[key].W = self.params['W' + str(i + 1)]
self.layers[key].b = self.params['b' + str(i + 1)]
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
# 시간이 오래 걸릴 경우 데이터를 줄인다.
# x_train, t_train = x_train[:5000], t_train[:5000]
# x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
network = SimpleConvNet(input_dim=(1, 28, 28),
conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
# 매개변수 보존
network.save_params("params.pkl")
print("Saved Network Parameters!")
# 하이퍼파라미터
iters_num = 10000 # 반복 횟수를 적절히 설정한다.
train_size = x_train.shape[0] # 60000 개
batch_size = 100 # 미니배치 크기
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
# 1에폭당 반복 수
iter_per_epoch = max(train_size / batch_size, 1)
print(iter_per_epoch) # 600
for i in range(iters_num): # 10000
# 미니배치 획득 # 랜덤으로 100개씩 뽑아서 10000번을 수행하니까 백만번
batch_mask = np.random.choice(train_size, batch_size) # 100개 씩 뽑아서 10000번 백만번
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 기울기 계산
#grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)
# 매개변수 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 학습 경과 기록
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss) # cost 가 점점 줄어드는것을 보려고
# 1에폭당 정확도 계산 # 여기는 훈련이 아니라 1에폭 되었을때 정확도만 체크
if i % iter_per_epoch == 0: # 600 번마다 정확도 쌓는다.
print(x_train.shape) # 60000,784
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc) # 10000/600 개 16개 # 정확도가 점점 올라감
test_acc_list.append(test_acc) # 10000/600 개 16개 # 정확도가 점점 올라감
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
# 그래프 그리기
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show() | mit |
trankmichael/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
ehogan/iris | lib/iris/io/format_picker.py | 16 | 11517 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A module to provide convenient file format identification through a combination of filename extension
and file based *magic* numbers.
To manage a collection of FormatSpecifications for loading::
import iris.io.format_picker as fp
import matplotlib.pyplot as plt
fagent = fp.FormatAgent()
png_spec = fp.FormatSpecification('PNG image', fp.MagicNumber(8),
0x89504E470D0A1A0A,
handler=lambda filename: plt.imread(filename),
priority=5
)
fagent.add_spec(png_spec)
To identify a specific format from a file::
with open(png_filename, 'rb') as png_fh:
handling_spec = fagent.get_spec(png_filename, png_fh)
In the example, handling_spec will now be the png_spec previously added to the agent.
Now that a specification has been found, if a handler has been given with the specification, then the file can be handled::
handler = handling_spec.handler
if handler is None:
raise ValueError('File cannot be handled.')
else:
result = handler(filename)
The calling sequence of handler is dependent on the function given in the original specification and can be customised to your project's needs.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import collections
import functools
import os
import struct
import iris.io
class FormatAgent(object):
"""
The FormatAgent class is the containing object which is responsible for identifying the format of a given file
by interrogating its children FormatSpecification instances.
Typically a FormatAgent will be created empty and then extended with the :meth:`FormatAgent.add_spec` method::
agent = FormatAgent()
agent.add_spec(NetCDF_specification)
Less commonly, this can also be written::
agent = FormatAgent(NetCDF_specification)
"""
def __init__(self, format_specs=None):
""" """
self._format_specs = list(format_specs or [])
self._format_specs.sort()
def add_spec(self, format_spec):
"""Add a FormatSpecification instance to this agent for format consideration."""
self._format_specs.append(format_spec)
self._format_specs.sort()
def __repr__(self):
return 'FormatAgent(%r)' % self._format_specs
def __str__(self):
prefix = ' * ' if len(self._format_specs) > 1 else ''
return prefix + '\n * '.join(['%s' % format_spec for format_spec in self._format_specs])
def get_spec(self, basename, buffer_obj):
"""
Pick the first FormatSpecification which can handle the given
filename and file/buffer object.
.. note::
``buffer_obj`` may be ``None`` when a seekable file handle is not
feasible (such as over the http protocol). In these cases only the
format specifications which do not require a file handle are
tested.
"""
element_cache = {}
for format_spec in self._format_specs:
# For the case where a buffer_obj is None (such as for the
# http protocol) skip any specs which require a fh - they
# don't match.
if buffer_obj is None and format_spec.file_element.requires_fh:
continue
fmt_elem = format_spec.file_element
fmt_elem_value = format_spec.file_element_value
# cache the results for each file element
if repr(fmt_elem) not in element_cache:
# N.B. File oriented as this is assuming seekable stream.
if buffer_obj is not None and buffer_obj.tell() != 0:
# reset the buffer if tell != 0
buffer_obj.seek(0)
element_cache[repr(fmt_elem)] = \
fmt_elem.get_element(basename, buffer_obj)
# If we have a callable object, then call it and tests its result, otherwise test using basic equality
if isinstance(fmt_elem_value, collections.Callable):
matches = fmt_elem_value(element_cache[repr(fmt_elem)])
elif element_cache[repr(fmt_elem)] == fmt_elem_value:
matches = True
else:
matches = False
if matches:
return format_spec
printable_values = {}
for key, value in six.iteritems(element_cache):
value = str(value)
if len(value) > 50:
value = value[:50] + '...'
printable_values[key] = value
msg = ('No format specification could be found for the given buffer.'
' File element cache:\n {}'.format(printable_values))
raise ValueError(msg)
@functools.total_ordering
class FormatSpecification(object):
"""
Provides the base class for file type definition.
Every FormatSpecification instance has a name which can be accessed with the :attr:`FormatSpecification.name` property and
a FileElement, such as filename extension or 32-bit magic number, with an associated value for format identification.
"""
def __init__(self, format_name, file_element, file_element_value,
handler=None, priority=0, constraint_aware_handler=False):
"""
Constructs a new FormatSpecification given the format_name and particular FileElements
Args:
* format_name - string name of fileformat being described
* file_element - FileElement instance of the element which identifies this FormatSpecification
* file_element_value - The value that the file_element should take if a file matches this FormatSpecification
Kwargs:
* handler - function which will be called when the specification has been identified and is required to handler a format.
If None, then the file can still be identified but no handling can be done.
* priority - Integer giving a priority for considering this specification where higher priority means sooner consideration.
"""
if not isinstance(file_element, FileElement):
raise ValueError('file_element must be an instance of FileElement, got %r' % file_element)
self._file_element = file_element
self._file_element_value = file_element_value
self._format_name = format_name
self._handler = handler
self.priority = priority
self.constraint_aware_handler = constraint_aware_handler
def __hash__(self):
# Hashed by specification for consistent ordering in FormatAgent (including self._handler in this hash
# for example would order randomly according to object id)
return hash(self._file_element)
@property
def file_element(self):
return self._file_element
@property
def file_element_value(self):
return self._file_element_value
@property
def name(self):
"""The name of this FileFormat. (Read only)"""
return self._format_name
@property
def handler(self):
"""The handler function of this FileFormat. (Read only)"""
return self._handler
def _sort_key(self):
return (-self.priority, self.name, self.file_element)
def __lt__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return self._sort_key() < other._sort_key()
def __eq__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return self._sort_key() == other._sort_key()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
# N.B. loader is not always going to provide a nice repr if it is a lambda function, hence a prettier version is available in __str__
return 'FormatSpecification(%r, %r, %r, handler=%r, priority=%s)' % (self._format_name, self._file_element,
self._file_element_value, self.handler, self.priority)
def __str__(self):
return '%s%s (priority %s)' % (self.name, ' (no handler available)' if self.handler is None else '', self.priority)
class FileElement(object):
"""
Represents a specific aspect of a FileFormat which can be identified using the given element getter function.
"""
def __init__(self, requires_fh=True):
"""
Constructs a new file element, which may require a file buffer.
Kwargs:
* requires_fh - Whether this FileElement needs a file buffer.
"""
self.requires_fh = requires_fh
def get_element(self, basename, file_handle):
"""Called when identifying the element of a file that this FileElement is representing."""
raise NotImplementedError("get_element must be defined in a subclass")
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class MagicNumber(FileElement):
"""A :class:`FileElement` that returns a byte sequence in the file."""
len_formats = {4: ">L", 8: ">Q"}
def __init__(self, num_bytes, offset=None):
FileElement.__init__(self)
self._num_bytes = num_bytes
self._offset = offset
def get_element(self, basename, file_handle):
if self._offset is not None:
file_handle.seek(self._offset)
bytes = file_handle.read(self._num_bytes)
fmt = self.len_formats.get(self._num_bytes)
if len(bytes) != self._num_bytes:
raise EOFError(file_handle.name)
if fmt is None:
result = bytes
else:
result = struct.unpack(fmt, bytes)[0]
return result
def __repr__(self):
return 'MagicNumber({}, {})'.format(self._num_bytes, self._offset)
class FileExtension(FileElement):
"""A :class:`FileElement` that returns the extension from the filename."""
def get_element(self, basename, file_handle):
return os.path.splitext(basename)[1]
class LeadingLine(FileElement):
"""A :class:`FileElement` that returns the first line from the file."""
def get_element(self, basename, file_handle):
return file_handle.readline()
class UriProtocol(FileElement):
"""
A :class:`FileElement` that returns the "scheme" and "part" from a URI,
using :func:`~iris.io.decode_uri`.
"""
def __init__(self):
FileElement.__init__(self, requires_fh=False)
def get_element(self, basename, file_handle):
return iris.io.decode_uri(basename)[0]
| lgpl-3.0 |
haudren/scipy | scipy/optimize/_lsq/least_squares.py | 22 | 36536 | """Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional function of n variables) and
the loss function rho(s) (a scalar function), `least_squares` finds a
local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| bsd-3-clause |
zengbin93/Debug_A | debug_a/analyst/pazq.py | 1 | 1929 | # coding: utf-8
"""
平安证券 - 对账单 - 分析
===============================================================================
"""
import os
import pandas as pd
# path = r"C:\Users\Mike\Desktop\pazq_records"
def read_data(path):
files = [os.path.join(path, file) for file in os.listdir(path) if file.endswith(".xls")]
res = pd.DataFrame()
for file in files:
data = pd.read_csv(file, encoding='gbk', sep='\t')
res = res.append(data, ignore_index=True)
res.columns = [x.strip('"=') for x in res.columns]
for col in res.columns:
res[col] = res[col].astype(str)
res[col] = res[col].apply(lambda x: x.strip('"='))
res.sort_values("发生日期", ascending=False, inplace=True)
res.reset_index(drop=True, inplace=True)
res.drop(['备注', 'Unnamed: 21'], axis=1, inplace=True)
float_col = ['发生金额', '成交均价', '成交数量', '成交金额', '股份余额',
'手续费', '印花税', '资金余额', '委托价格', '委托数量', '过户费']
for col in float_col:
res[col] = res[col].astype(float)
return res
def cal_gain(data):
"""根据交易数据,计算总盈亏"""
res = dict(data.groupby('业务名称').sum()['发生金额'])
total_gain = -res['银证转出'] - res['银证转入']
return round(total_gain, 4)
def cal_share_gain(data):
"""计算个股操作盈亏"""
data = data[data['证券代码'] != "nan"]
res = data.groupby(['证券名称', '业务名称']).sum()['成交金额']
shares = res.index.levels[0]
share_gains = []
for share in shares:
try:
print(share, " - 总盈亏:")
stg = res[share]['证券卖出清算'] - res[share]['证券买入清算']
print(stg, '\n')
share_gains.append((share, stg))
except:
print("\nerro: ", res[share])
return share_gains
| apache-2.0 |
yunque/sms-tools | lectures/06-Harmonic-model/plots-code/piano-spectrum.py | 24 | 1038 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/piano.wav')
M = 1100
w = np.blackman(M)
N = 2048
pin = .3*fs
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5)
plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)])
plt.title('x (piano.wav)')
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5)
plt.axis([0,fs/4,-90,max(mX)])
plt.title ('mX')
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5)
plt.axis([0,fs/4,-5,max(pX)])
plt.title ('pX')
plt.tight_layout()
plt.savefig('piano-spectrum.png')
plt.show()
| agpl-3.0 |
magsol/PySpark-Affinities | compute_affinities.py | 1 | 5652 | import argparse
import numpy as np
import numpy.linalg as la
from pyspark import SparkConf, SparkContext
import scipy
import scipy.ndimage
import scipy.sparse as sparse
import sklearn.metrics.pairwise as pairwise
from image_affinities import connectivity, parse_coordinates
def distance_threshold(z):
"""
Firstly, this method is only invoked if the distance threshold epsilon is set.
Secondly, this method returns True if the pair of points being compared
fall within that distance threshold.
"""
# Parse out the floating point coordinates.
x = parse_coordinates(z[0][1])
y = parse_coordinates(z[1][1])
# All done!
return la.norm(x - y) < EPSILON.value
def pairwise_pixels(pixels):
"""
Computes the affinity for a pair of pixels.
"""
i, j = pixels
rbf = pairwise.rbf_kernel(IMAGE.value[i], IMAGE.value[j], gamma = SIGMA.value)[0, 0]
return [(i, [j, rbf]), (j, [i, rbf])]
def pixel_row_vector(affinities):
"""
Assembles the affinities into a correct row vector.
"""
rowid, values = affinities
return [rowid, {v[0]: v[1] for v in values}]
def pairwise_points(z):
"""
Computes the RBF affinity for a pair of Cartesian points.
"""
# Parse out floating point coordinates.
x = parse_coordinates(z[0][1])
y = parse_coordinates(z[1][1])
# Find the RBF kernel between them.
return [int(z[0][0]), [int(z[1][0]), pairwise.rbf_kernel(x, y, gamma = SIGMA.value)[0, 0]]]
# We don't need to return a pair of tuples, because all pairings
# are enumerated; there will be another case where the values of
# i1 and i2 are switched.
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'PySpark Affinities',
epilog = 'lol sp3c+r4l', add_help = 'How to use',
prog = 'python compute_affinities.py [image | text] <arguments>')
parser.add_argument("-i", "--input", required = True,
help = "Path to an image file, or text file with n-dimensional comma-separated Cartesian data.")
parser.add_argument("-o", "--output", required = True,
help = "Output path.")
parser.add_argument("-s", "--sigma", type = float, default = 0.0,
help = "If > 0.0, the value of sigma used in RBF kernel. Otherwise, it is estimated. [DEFAULT: 0.0]")
parser.add_argument("-q", "--multiplier", type = float, default = 1.0,
help = "Constant multiplier for sigma in the RBF kernel. [DEFAULT: 1.0]")
# Create subparsers for images and text.
subparsers = parser.add_subparsers(dest = "sub_name")
text_p = subparsers.add_parser("text")
text_p.add_argument("-e", "--epsilon", type = float, default = 2.0,
help = "If specified, pairwise affinities are only computed for points whose Euclidean distance is less than this. [DEFAULT: 2.0]")
image_p = subparsers.add_parser("image")
image_p.add_argument("-n", "--neighborhood", type = int, choices = [4, 8], default = 8,
help = "Number of connected pixels in the neighborhood, 4 or 8. [DEFAULT: 8]")
args = vars(parser.parse_args())
infile = args['input']
outdir = args['output']
sigma = args['sigma']
q = args['multiplier']
# Set up the Spark context. Because awesome.
sc = SparkContext(conf = SparkConf())
if args['sub_name'] != 'image' and args['sub_name'] != 'text':
print 'Command "%s" not recognized.' % args['sub_name']
quit()
TYPE = sc.broadcast(args['sub_name'])
A = None
if args["sub_name"] == "image":
# Read in the image. Broadcast it and determine the indices of connected pixels.
image = scipy.ndimage.imread(args['input'], flatten = True)
IMAGE = sc.broadcast(image.ravel())
A = sc.parallelize(connectivity(image.shape[0], image.shape[1]), sc.defaultParallelism * 4)
else:
# Read the input file, index each data point, and parallelize to an RDD.
rawdata = np.loadtxt(args['input'], dtype = np.str, delimiter = "\n")
indexed = np.vstack([np.arange(rawdata.shape[0]), rawdata]).T
EPSILON = sc.broadcast(args['epsilon'])
D = sc.parallelize(indexed)
A = D.cartesian(D)
if EPSILON.value > 0.0:
A = A.filter(distance_threshold)
# If sigma was not specified, we'll compute it ourselves. We do this by first
# finding the *median difference* between all points (connected pixels or Cartesian
# data that passes the distance threshold), and use that to compute sigma.
if sigma <= 0.0:
d = np.median(np.array(
A.map(
lambda x:
np.abs(IMAGE.value[x[0]] - IMAGE.value[x[1]])
if TYPE.value == 'image' else
la.norm(parse_coordinates(x[0][1]) - parse_coordinates(x[1][1]))
)
.collect()))
sigma = 1.0 / (2 * ((d * q) ** 2))
# Now that we have sigma, let's compute an affinity matrix.
SIGMA = sc.broadcast(sigma)
if args['sub_name'] == 'image':
affinities = A.flatMap(pairwise_pixels).groupByKey().map(pixel_row_vector).sortByKey().collect()
num = image.shape[0] * image.shape[1]
A1 = sparse.dok_matrix((num, num), dtype = np.float)
for rowid, values in affinities:
for k, v in values.iteritems():
A1[rowid, k] = v
diag = np.arange(num, dtype = np.int)
A1[diag, diag] = 1.0
else:
affinities = A.map(pairwise_points).sortByKey().collect()
print 'Of %sx%s possible pairs, we have %s.' % (rawdata.shape[0], rawdata.shape[0], len(affinities))
| apache-2.0 |
johnglover/simpl | simpl/plot/__init__.py | 1 | 1682 | import matplotlib.pyplot as plt
import colours
def plot_peaks(frames):
"Plot peaks found by a peak detection algorithm"
# Get the maximum peak amplitude, used to select an appropriate
# colour for each peak.
max_amp = None
for frame in frames:
if frame.peaks:
max_amp = max(max_amp, max([p.amplitude for p in frame.peaks]))
if not max_amp:
print "No peaks with an amplitude of > 0 to plot"
return
for frame_number, frame in enumerate(frames):
for peak in frame.peaks:
plt.plot(frame_number, int(peak.frequency), linestyle="None",
marker="o", markersize=2, markeredgewidth=None,
markerfacecolor=colours.pbj(peak.amplitude / max_amp))
def plot_partials(frames, show_peaks=False):
"Plot partials created by a partial tracking algorithm"
# Get the maximum peak amplitude, used to select an appropriate
# colour for each peak.
max_amp = None
for frame in frames:
if frame.partials:
max_amp = max(max_amp, max([p.amplitude for p in frame.partials]))
if not max_amp:
print "No partial peaks with an amplitude of > 0 to plot"
return
for n in range(len(frames) - 1):
for p in range(len(frames[n].partials)):
x = [n, n + 1]
y = [frames[n].partials[p].frequency,
frames[n + 1].partials[p].frequency]
amp = frames[n].partials[p].amplitude
freq = frames[n + 1].partials[p].frequency
if amp and freq:
plt.plot(x, y, color=colours.pbj(amp / max_amp))
if show_peaks:
plot_peaks(frames)
| gpl-3.0 |
Egdus/SimpleShedSimulator | simpleshedsimulator/core/act.py | 1 | 16843 | #SimpleShedSimulator for quick schedule risk analysis
#Copyright (C) 2014 Anders Jensen
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#standard modules
"""
The act module contains the activity class which is used to handle
activities. It implements various methods to handle and implement
activities and every simpleshedsimulator project must have instances of
this class. The methods are mainly "setting"- and "getting" methods
where various attributes can be assigned, changed or retrieved. After
initiating activity objects as in your project these objectes are then
assigned to a network object which then allows yoy to control your
network. For further information see the net module
"""
import datetime
import time
import sqlite3 as lite
import os
#Simpleshedsimulator modules
import stats
from triangular import triang
from table import MakeTable
from tools import StrToInt, IntToStr, IO
#plotting with matplotlib:
import numpy as np
import matplotlib as mpl
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from matplotlib import dates
class activity:
'''Creates an activity instance which later can be used by a
network object
Args:
Returns:
Object
Raises:
ex::
a = activity()
a.AssignID(1)
a.AssignDuration(5)
a.AssignSuccsesors(2)
'''
def __init__(self):
self.today = datetime.date.today()
self.free_critical = False
self.total_critical = False
self.free_critical_slack = None
self.total_critical_slack = None
self.early_start = None
self.late_start = None
self.early_finish = None
self.late_finish = None
def __eq__(self, other):
return self.ID == other.ID
def AssignID(self, ID):
'''Assigns an ID to the activity. The smallest allowed ID is 1.
Args:
ID (int): Preffered ID of the activity
Returns:
Sets self.ID = ID.
Raises:
'''
if ID < 1:
errormessage = 'Cant have ID less than 1.'
raise AttributeError(errormessage)
else:
try:
self.ID = int(ID)
except:
print "Assigned ID must be integer"
def AssignName(self, name):
'''Assigns a name to the activity inctance.
Args:
name (str): The activity name
Returns:
Sets self.name = name
Raises:'''
self.name = name
def AssignStart(self, Y, M, D):
'''Assigns a startdate to the activit inctance.
Args:
Y,M,D (int): The activity's startdate
Returns:
Sets self.start = datetime.date(Y,M,D) and calculates
endate if possible.
Raises:'''
self.start = datetime.date(Y, M, D)
try:
difference = datetime.timedelta(self.duration)
newdate = self.start + difference
self.AssignEnd(newdate.year, newdate.month, newdate.day)
except AttributeError:
try:
delta = self.end - self.start
self.duration = delta.days
except AttributeError:
pass
def AssignEnd(self, Y, M, D):
'''Assigns a enddate to the activit inctance.
Args:
Y,M,D (int): The activity's enddate given as YYYY, mm,
dd
Returns:
Sets self.end = datetime.date(Y,M,D) and sets the
duration if possible.
Raises:'''
self.end = datetime.date(Y, M, D)
try:
delta = self.end - self.start
self.duration = delta.days
except AttributeError:
pass
def AssignDuration(self, duration):
'''Assigns a Duration to the activity inctance.
Args:
duraton (int): The activity's duration
Returns:
Sets self.duration = duration and sets the enddate
if possible.
Raises:
'''
self.duration = duration
try:
difference = datetime.timedelta(days=duration)
newdate = self.start + difference
self.AssignEnd(newdate.year, newdate.month, newdate.day)
except AttributeError:
return "No startdate assigend. Use the method AssignStart"
def AssignSuccsesors(self, *args):
'''Assigns succerssors to the activity inctance.
Args:
*args (int): The activity's successors
Returns:
Sets self.S = [] (list).
Raises:'''
#Add FS condition to empty predecessors
arguments = []
for q in args:
try:
q = int(q)
if isinstance(q, int):
arguments.append(str(q) + 'FS')
else:
arguments.append(q)
except ValueError:
arguments.append(q)
args = arguments
try:
for q in args:
if self.ID < StrToInt(q):
self.S.append(q)
elif self.ID > StrToInt(q):
errormessage = 'Successor ID smaaler then Activity ID '
raise AttributeError(errormessage)
#remove duplicates and convert to str()
self.S = [str(i) for i in self.S]
self.S = list(set(self.S))
#Convert to uppercase
suc = []
for s in self.S:
try:
suc.append(s.upper())
except SyntaxError:
suc.append(s)
continue
self.S = suc
except:
self.S = []
for q in args:
if self.ID < StrToInt(q):
self.S.append(q)
elif self.ID > StrToInt(q):
errormessage = 'Successor ID smaaler then Activity ID '
raise AttributeError(errormessage)
#remove duplicates and convert to str()
self.S = [str(i) for i in self.S]
self.S = list(set(self.S))
#Convert to uppercase
suc = []
for s in self.S:
try:
suc.append(s.upper())
except SyntaxError:
suc.append(s)
continue
self.S = suc
def AssignPredecesors(self, *args):
'''Assigns predecesors to the activity inctance.
Args:
*args (int): The activity's predecesors
Returns:
Sets self.P = [] (list).
Raises:'''
#Add FS condition to empty predecessors
arguments = []
for q in args:
try:
q = int(q)
if isinstance(q, int):
arguments.append(str(q) + 'FS')
else:
arguments.append(q)
except ValueError:
arguments.append(q)
args = arguments
try:
for q in args:
if self.ID > StrToInt(q):
self.P.append(q)
elif self.ID < StrToInt(q):
raise AttributeError('Predecessor ID bigger then Activity ID ')
#remove duplicates and convert to str()
self.P = [str(i) for i in self.P]
self.P = list(set(self.P))
#Convert to uppercase
pre = []
for p in self.P:
try:
pre.append(p.upper())
except SyntaxError:
pre.append(p)
continue
self.P = pre
except:
self.P = []
for q in args:
if self.ID > StrToInt(q):
self.P.append(q)
elif self.ID < StrToInt(q):
raise AttributeError('Predecessor ID bigger then Activity ID ')
#remove duplicates and convert to str()
self.P = [str(i) for i in self.P]
self.P = list(set(self.P))
#Convert to uppercase
pre = []
for p in self.P:
try:
pre.append(p.upper())
except SyntaxError:
pre.append(p)
continue
self.P = pre
def GetStart(self, asobject=False):
'''returns the startdate either as a list of times or
as a datetimeobject.
Args:
asobject (boolean): Determines wether to return a
timedateobject or a list of times
Returns:
return a timedateobject or a list of times
Raises:'''
try:
if asobject == False:
Start = []
for q in str(self.start).split('-'):
Start.append(int(q))
return Start
elif asobject == True:
return self.start
except AttributeError:
pass
def GetEnd(self, asobject=False):
'''returns the enddate either as a list of times or as a
datetimeobject.
Args:
asobject (boolean): Determines wether to return
a timedateobject or a list of times
Returns:
return a timedateobject or a list of times
Raises:'''
try:
if asobject == False:
End = []
for q in str(self.end).split('-'):
End.append(int(q))
return End
elif asobject == True:
return self.end
except AttributeError:
pass
def GetDuration(self):
'''Returns the duration of the activity
Args:
Returns:
self.duration (int)
Raises:'''
try:
return self.duration
except AttributeError:
pass
def GetName(self):
'''Returns the name of the activity
Args:
Returns:
self.name (str)
Raises:'''
try:
return self.name
except AttributeError:
pass
def GetID(self):
'''Returns the ID of the activity
Args:
Returns:
self.ID (int)
Raises:'''
try:
return self.ID
except:
print "No ID assigned. Use AssignID()."
def GetSuccsesors(self):
'''Returns the succsessors of the activity
Args:
Returns:
self.S (list)
Raises:'''
try:
return self.S
except:
return None
def GetPredecesors(self):
'''Returns the predecessors of the activity
Args:
Returns:
self.P (list)
Raises:'''
try:
return self.P
except:
return None
def GetSummary(self):
self.summary = []
self.summary.append(self.GetID())
self.summary.append(self.GetName())
self.summary.append(self.GetDuration())
self.summary.append(self.GetStart())
self.summary.append(self.GetEnd())
self.summary.append(self.GetSuccsesors())
self.summary.append(self.GetPredecesors())
return self.summary
def EstablilshPredecesor(self, activity):
startdate = activity.GetEnd()
self.AssignStart(startdate[0], startdate[1], startdate[2])
def SetCritical(self, critical, free=True):
'''Sets the activity to critical
Args: free (True/False)
Returns:
Raises:'''
if free == True:
if critical in ["yes", True, "y", "Y", "YES", "Yes"]:
self.free_critical = True
else:
if critical in ["yes", True, "y", "Y", "YES", "Yes"]:
self.total_critical = True
def GetCritical(self, free=False):
'''Returns True/false depending on criticality of activity
(.SetCritical())
Args: free (True/False)
Returns:
Raises:'''
try:
if free == True:
return free_critical
else:
return total_critical
except:
return "None"
def SetSlack(self, slack, free=False):
'''Sets the slack of the activity
Args: slack (int), free (True/False)
Returns:
Raises:'''
if free == True:
self.free_critical_slack = slack.days
if self.free_critical_slack <= 0.0:
self.free_critical_slack = 0
self.SetCritical(True, free = True)
elif self.free_critical_slack > 0.0:
self.SetCritical(False, free = True)
else:
self.total_critical_slack = slack
if self.total_critical_slack <= 0.0:
self.SetCritical(True, free = False)
elif self.total_critical_slack > 0.0:
self.SetCritical(False, free = False)
def GetSlack(self, free=False):
'''Returns the slack of the activity
Args: free (True/False)
Returns: int
Raises:'''
if free == True:
return self.free_critical_slack
else:
return self.total_critical_slack
def AssignDurationRange(self, **kwargs):
'''Sets the duration range of the activity such that the
activity's duration is in the intervall [min, max] and mode
equal to ml.
Args:
Returns:
Raises:
ex::
activity.AssignDurationRang(min=1, ml=2, max=3)
'''
for args in kwargs.items():
if args[0] in ["min", "MIN", "Min"]:
self.minduration = args[1]
elif args[0] in ["max", "MAX", "Max"]:
self.maxduration = args[1]
elif args[0] in ["ml", "ML", "Ml"]:
self.mlduration = args[1]
def SetDurationRangeMin(self, MIN):
'''Sets the minimum duration of the activity
Args: MIN (int)
Returns:
Raises:
'''
self.minduration = MIN
def SetDurationRangeML(self,ML):
'''Sets the most likely duration of the activity
Args: ML (int)
Returns:
Raises:
'''
self.mlduration = ML
def SetDurationRangeMax(self,MAX):
'''Sets the maximum duration of the activity
Args: MAX (int)
Returns:
Raises:
'''
self.maxduration = MAX
def GetDurationRangeMin(self):
'''Returns the minimum duration
Args:
Returns: self.minduration (int)
Raises:
'''
try:
return self.minduration
except:
return None
def GetDurationRangeML(self):
'''Returns the most likely duration
Args:
Returns: self.mlduration (int)
Raises:
'''
try:
return self.mlduration
except:
return None
def GetDurationRangeMax(self):
'''Returns the maximum duration
Args:
Returns: self.maxduration (int)
Raises:
'''
try:
return self.maxduration
except:
return None
def IncrementID(self, increment, SUC=True, PRE=False, ID = True):
if ID == True:
#increment id
current_ID = self.GetID()
newID = current_ID + increment
self.AssignID(newID)
#Increment Succsesors
if SUC == True:
newS = []
try:
for suc in self.S:
condition = IntToStr(suc)
newS.append(str(StrToInt(suc) + increment) + condition)
self.S = newS
except AttributeError:
pass
| gpl-3.0 |
ioshchepkov/SHTOOLS | examples/python/Other/TestOther.py | 2 | 1487 | #!/usr/bin/env python
"""
This script tests the gravity and magnetics routines.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import utils
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
# set shtools plot style:
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
TestCircle()
TestWigner()
# ==== TEST FUNCTIONS ====
def TestCircle():
coord = utils.MakeCircleCoord(30, 10, 30)
lat = coord[:, 0]
lon = coord[:, 1]
fig = plt.figure()
plt.axis([-180, 180, -90, 90])
plt.plot(lon, lat, 'r-', 10, 30, 'r.')
coord = utils.MakeCircleCoord(-75, -45, 10)
plt.plot(coord[:, 1], coord[:, 0], 'b-', -45, -75, 'b.')
coord = utils.MakeEllipseCoord(0, 45, 20, 30, 10)
plt.plot(coord[:, 1], coord[:, 0], 'g-', 45, 0, 'g.')
fig.savefig('Circles.png')
def TestWigner():
w3j, jmin, jmax = utils.Wigner3j(4, 2, 0, 0, 0)
print("< J, 4, 2 / 0, 0, 0 >")
print("jmin = ", jmin)
print("jmax = ", jmax)
print(w3j)
w3j, jmin, jmax = utils.Wigner3j(10, 14, -1, -4, 5)
print("< J, 10, 14 / -1, -4, 5 >")
print("jmin = ", jmin)
print("jmax = ", jmax)
print(w3j)
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
walterreade/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
aap5869/RIT | computing_and_control/ipcv/histogram_opencv.py | 1 | 5919 | import cv2
import numpy
def histogram(image, bitDepth=8):
"""
title::
histogram_opencv
description::
This method will gneerate the histogram, probability density funciton
and the cumulative density funciton of an image. It will use the
cv2.calcHist method from the opencv library to help generate each
output. Each output will be returned as a list.
attributes::
images
(numpy ndarray) An image file that is read in by the cv2.imread
function. The iamge can be either black and white or full color and
can have any bit depth. For color images, the color channel order
is BGR (blue, green, red).
bitDepth
(int [optional]) The bit depth of each color channel of the image.
Defaults to 8 bits per color channel.
returns::
h
(list) The histogram for the image. For a color image, the
histogram is a list of three lists with each list representing the
histogram for each color channel in BGR order. For a grayscale
image, the histogram will be returned as a 2^N element list, N
being the bit depth of the image.
pdf
(list) The PDF (probability density function) for the image. For a
color image, the PDF is a list of three lists with each list
representing the PDF for each color channle in BGR order. For a
grayscale image, the PDf will be returned as a 2^N element list, N
being the bit depth of the image.
cdf
(list) The CDF (cumulative density function) for the image. For a
color image, the CDF is a list of three lists with each list
representing the CDF for each color channel in BGR order. FOr a
grayscale iamge, the CDF will be returned as a 2^N element list,
N being the bit depth of the image.
author::
Alex Perkins
copyright::
Copyright (C) 2016, Rochester Institute of Technology
version::
1.0.0
"""
# Determine the number of pixel values in the image
maxCount = 2**bitDepth
# Check if the image is a color image
if len(image.shape) == 3:
# Get the number of rows, columns and planes in the image
rows, cols, planes = image.shape
# Determine the number of pixels in the image
numPixels = rows*cols
# Create the histogram with BGR color channels
h = numpy.array([[0]*maxCount, [0]*maxCount, [0]*maxCount])
# Iterate through each color channel and get the histogram for each
for plane in range(planes):
h[plane] = cv2.calcHist([image], [plane], None, [maxCount],\
[0, maxCount]).reshape(-1)
# Generate the PDF and CDF for the image
pdf = h/numPixels
cdf = numpy.cumsum(pdf, axis=1)
# Image is grayscale if previous check is not met
else:
# Get the number of rows and columns in the image
rows, cols = image.shape
# Determine the number of pixels in the image
numPixels = rows*cols
# Get the histogram for the image and generate the PDF and CDF for
# the image
h = cv2.calcHist([image], [0], None, [maxCount], [0, maxCount])
pdf = h/numPixels
cdf = numpy.cumsum(pdf)
# Convert each output to a list
h = h.tolist()
pdf = pdf.tolist()
cdf = cdf.tolist()
return h, pdf, cdf
if __name__ == '__main__':
import cv2
import ipcv
import time
# A greyscale test image
filename = 'crowd.jpg'
# A 3-channel color test image
filename = 'lenna.tif'
im = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
print('Data type = {0}'.format(type(im)))
print('Image shape = {0}'.format(im.shape))
print('Image size = {0}'.format(im.size))
dataType = str(im.dtype)
imType = {'uint8':8, 'uint16':16, 'uint32':32}
startTime = time.time()
h, pdf, cdf = ipcv.histogram(im, bitDepth=imType[dataType])
print('Elasped time = {0} [s]'.format(time.time() - startTime))
# The follow will produce a figure containing color-coded plots of the
# computed histogram, probability function (PDF), and cumulative density
# function (CDF)
import matplotlib.pyplot
import matplotlib.backends.backend_agg
maxCount = 2**imType[dataType]
bins = list(range(maxCount))
figure = matplotlib.pyplot.figure('Histogram')
canvas = matplotlib.backends.backend_agg.FigureCanvas(figure)
histAxes = figure.add_subplot(3, 1, 1)
pdfAxes = figure.add_subplot(3, 1, 2)
cdfAxes = figure.add_subplot(3, 1, 3)
if len(im.shape) == 3:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h[0], 'b', \
bins, h[1], 'g', \
bins, h[2], 'r')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf[0], 'b', \
bins, pdf[1], 'g', \
bins, pdf[2], 'r')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf[0], 'b', \
bins, cdf[1], 'g', \
bins, cdf[2], 'r')
else:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h, 'k')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf, 'k')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf, 'k')
matplotlib.pyplot.show()
| mit |
aosagie/spark | python/pyspark/ml/clustering.py | 8 | 49816 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
class KMeansModel(JavaModel, GeneralJavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
..note:: Deprecated in 3.0.0. It will be removed in future versions. Use
ClusteringEvaluator instead. You can also get the cost on the training dataset in the
summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
jayArnel/nursery-application | svm/management/commands/svm.py | 1 | 7703 | import os.path
import math
import numpy as np
import cPickle as pickle
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from _getch import getch
from sklearn import svm
from sklearn import preprocessing
from sklearn import utils
class Command(BaseCommand):
help = 'SVM commands. Loads and displays SVM parameters by default.'
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument('--clear',
action='store_true',
dest='clear',
default=False,
help='Clear SVM')
parser.add_argument('--train',
action='store_true',
dest='train',
default=False,
help='Train the SVM')
parser.add_argument('--predict',
action='store_true',
dest='predict',
default=False,
help='Use SVM to predict')
def handle(self, *args, **options):
print options
if options['clear']:
self.clear()
elif options['predict']:
self.predict()
elif options['train']:
self.train_handle()
elif options:
self.load()
def load(self):
if (os.path.isfile(settings.SVM_PATH)):
svm = pickle.load(open(settings.SVM_PATH, 'r'))
self.stdout.write(str(svm.get_params()))
else:
self.stdout.write(
'No SVM is trained. Train SVM first? (Y/N): ', ending='')
choice = getch()
self.stdout.write(choice)
if choice == 'Y':
call_command('svm','--train')
elif choice == 'N':
self.stdout.write('Aborting process.')
else:
raise CommandError('SVM loading cancelled')
def clear(self):
if (os.path.isfile(settings.SVM_PATH)):
self.stdout.write(
'Are you sure you want to clear the SVM? (Y/N): ',
ending='')
choice = getch()
self.stdout.write(choice)
if choice == 'Y':
os.remove(settings.SVM_PATH)
self.stdout.write('SVM cleared.')
elif choice == 'N':
self.stdout.write('SVM not cleared.')
else:
self.stdout.write('No SVM found. Nothing to clear.')
def predict(self):
self.stdout.write('Enter values:')
X = []
i = 0
encoders = pickle.load(open(settings.ENCODERS_PATH, 'r'))
self.stdout.write('Parent\'s occupation:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Child\'s nursery:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Form of the family:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Number of children:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Housing conditions:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Financial standing of the family:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Social conditions:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
self.stdout.write('Health conditions:', ending='')
le = encoders[i]
for j in range(0, len(le.classes_)):
self.stdout.write(
'{0}: {1}'.format(j, le.classes_[j]), ending='\t')
X.append(raw_input())
i += 1
svm = pickle.load(open(settings.SVM_PATH, 'r'))
print X
class_encoder = encoders[i]
prediction = svm.predict(X)
label = class_encoder.inverse_transform(prediction)
self.stdout.write(label[0])
def train_handle(self):
if (os.path.isfile(settings.SVM_PATH)):
self.stdout.write(
'The SVM is already trained. Do you want to retrain and '
'overwrite current SVM parameters? (Y/N): ', ending='')
choice = getch()
self.stdout.write(choice)
if choice == 'Y':
pass
elif choice == 'N':
self.stdout.write('Aborting training.')
else:
raise CommandError('SVM training cancelled')
if (os.path.isfile(settings.DATASET_PATH)):
pass
else:
self.stdout.write(
'No dataset has been processed. Processing dataset.')
call_command('process_dataset')
self.train()
def train(self):
self.stdout.write('Training SVM...')
self.stdout.write('Loading dataset...')
dataset = pickle.load(open(settings.DATASET_PATH, 'r'))
self.stdout.write('dataset: \n%s' % dataset)
self.stdout.write('Paritioning dataset...')
instances = len(dataset)
num_train = instances * .80
num_train = math.modf(num_train)[1]
num_test = instances - num_train
X = dataset[:, :-1]
Y = dataset[:, -1]
X_train = X[:num_train]
Y_train = Y[:num_train]
X_test = X[-num_test:]
Y_test = Y[-num_test:]
gammaSteps = [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1]
CSteps = [1, 3, 10, 30, 100, 300, 1000, 3000]
maxScore = 0
optC = 0
optGamma = 0
optClf = svm.SVC()
self.stdout.write('Tuning parameters...')
for tempC in CSteps:
for tempGamma in gammaSteps:
clf = svm.SVC(C=tempC, gamma=tempGamma)
clf.fit(X_train, Y_train)
score = clf.score(X_test, Y_test)
self.stdout.write(
"C: {0}, gamma: {1}, score: {2}".format(
tempC, tempGamma, score))
if score > maxScore:
optC = tempC
optClf = clf
optGamma = tempGamma
maxScore = score
self.stdout.write(
"optimum:\n C: {0}, gamma: {1}, score: {2}".format(
optC, optGamma, maxScore))
self.stdout.write('Saving SVM...')
pickle.dump(optClf, open(settings.SVM_PATH, 'w'))
self.stdout.write('Done.')
| bsd-2-clause |
slipguru/l1l2py | l1l2py/scaler.py | 1 | 5911 | """Auxiliary class for scaling tau and mu.
TODO: Add info.
"""
# This code is written by
# Salvatore Masecchia <[email protected]>
# Samuele Fiorini <[email protected]>
# Federico Tomasi <[email protected]>
# Copyright (C) 2017 SlipGURU -
# Statistical Learning and Image Processing Genoa University Research Group
# Via Dodecaneso, 35 - 16146 Genova, ITALY.
#
# This file is part of L1L2Py.
#
# L1L2Py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# L1L2Py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with L1L2Py. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from collections import Sequence
class RangesScaler(object):
"""Given data and labels helps to scale L1L2 parameters ranges properly.
This class works on tau and mu ranges passed to the l1l2 selection
framework (see also :func:`l1l2py.model_selection` and related
function for details).
Scaling ranges permits to use relative (and not absolute) ranges of
parameters.
Attributes
----------
norm_data : :class:`numpy.ndarray`
Normalized data matrix.
norm_labels : :class:`numpy.ndarray`
Normalized labels vector.
Example
-------
from sklearn.preprocessing import robust_scale
from functools import partial
data_normalizer = partial(robust_scale, with_centering=1, with_scaling=0)
# or
# data_normalizer = l1l2py.tools.center
rs = RangesScaler(X, y, data_normalizer=data_normalizer)
tau_range_scale = rs.tau_range(np.linspace(0.1, 3, 10))
mu_range_scale = rs.mu_range(np.linspace(0.1, 3, 10))
"""
def __init__(self, data, labels, data_normalizer=None,
labels_normalizer=None):
"""Init for RangesScaler."""
self.norm_data = data
self.norm_labels = labels
self._tsf = self._msf = None
if data_normalizer:
self.norm_data = data_normalizer(self.norm_data)
if labels_normalizer:
self.norm_labels = labels_normalizer(self.norm_labels)
def tau_range(self, trange):
"""Return a scaled tau range.
Tau scaling factor is the maximum tau value to avoid and empty solution
(where all variables are discarded).
The value is estimated on the maximum correlation between data and
labels.
Parameters
----------
trange : :class:`numpy.ndarray`
Tau range containing relative values (expected maximum is lesser
than 1.0 and minimum greater than 0.0).
Returns
-------
tau_range : :class:`numpy.ndarray`
Scaled tau range.
"""
if np.max(trange) >= 1.0 or np.min(trange) < 0.0:
raise ValueError('Relative tau should be in [0,1)')
if isinstance(trange, Sequence):
trange = np.sort(trange)
return trange * self.tau_scaling_factor
def mu_range(self, mrange):
"""Return a scaled mu range.
Mu scaling factor is estimated on the maximum eigenvalue of the
correlation matrix and is used to simplify the parameters choice.
Parameters
----------
mrange : :class:`numpy.ndarray`
Mu range containing relative values (expected maximum is lesser
than 1.0 and minimum greater than 0.0).
Returns
-------
mu_range : :class:`numpy.ndarray`
Scaled mu range.
"""
if np.min(mrange) < 0.0:
raise ValueError('Relative mu should be greater than / equal to 0')
if isinstance(mrange, Sequence):
mrange = np.sort(mrange)
return mrange * self.mu_scaling_factor
@property
def tau_scaling_factor(self):
"""Tau scaling factor calculated on given data and labels."""
if self._tsf is None:
self._tsf = self._tau_scaling_factor()
return self._tsf
@property
def mu_scaling_factor(self):
"""Mu scaling factor calculated on given data matrix."""
if self._msf is None:
self._msf = self._mu_scaling_factor()
return self._msf
def _tau_scaling_factor(self):
# return l1l2py.algorithms.l1_bound(self.norm_data, self.norm_labels)
r"""Estimation of an useful maximum bound for the `l1` penalty term.
For each value of ``tau`` smaller than the maximum bound the solution
vector contains at least one non zero element.
.. warning
That is, bounds are right if you run the `l1l2` regularization
algorithm with the same data matrices.
Parameters
----------
data : (N, P) ndarray
Data matrix.
labels : (N,) or (N, 1) ndarray
Labels vector.
Returns
-------
tau_max : float
Maximum ``tau``.
"""
data = self.norm_data
labels = self.norm_labels
corr = np.abs(np.dot(data.T, labels))
tau_max = (corr.max() * (2.0 / data.shape[0]))
return tau_max
def _mu_scaling_factor(self):
n, d = self.norm_data.shape
if d > n:
tmp = np.dot(self.norm_data, self.norm_data.T)
num = np.linalg.eigvalsh(tmp).max()
else:
tmp = np.dot(self.norm_data.T, self.norm_data)
evals = np.linalg.eigvalsh(tmp)
num = evals.max() + evals.min()
return (num / (2. * n))
| gpl-3.0 |
GenericMappingTools/gmt-python | pygmt/src/velo.py | 1 | 11492 | """
velo - Plot velocity vectors, crosses, anisotropy bars, and wedges.
"""
import numpy as np
import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias
@fmt_docstring
@use_alias(
A="vector",
B="frame",
C="cmap",
D="rescale",
E="uncertaintycolor",
G="color",
H="scale",
I="shading",
J="projection",
L="line",
N="no_clip",
R="region",
S="spec",
U="timestamp",
V="verbose",
W="pen",
X="xshift",
Y="yshift",
Z="zvalue",
c="panel",
p="perspective",
t="transparency",
)
@kwargs_to_strings(R="sequence", c="sequence_comma", i="sequence_comma", p="sequence")
def velo(self, data=None, **kwargs):
r"""
Plot velocity vectors, crosses, anisotropy bars, and wedges.
Reads data values from files, :class:`numpy.ndarray` or
:class:`pandas.DataFrame` and plots the selected geodesy symbol on a map.
You may choose from velocity vectors and their uncertainties, rotational
wedges and their uncertainties, anisotropy bars, or strain crosses. Symbol
fills or their outlines may be colored based on constant parameters or via
color lookup tables.
Must provide ``data`` and ``spec``.
Full option list at :gmt-docs:`supplements/geodesy/velo.html`
{aliases}
Parameters
----------
data : str or {table-like}
Pass in either a file name to an ASCII data table, a 2D
{table-classes}.
Note that text columns are only supported with file or
:class:`pandas.DataFrame` inputs.
spec: str
Selects the meaning of the columns in the data file and the figure to
be plotted. In all cases, the scales are in data units per length unit
and sizes are in length units (default length unit is controlled by
:gmt-term:`PROJ_LENGTH_UNIT` unless **c**, **i**, or **p** is
appended).
- **e**\ [*velscale*/]\ *confidence*\ [**+f**\ *font*]
Velocity ellipses in (N,E) convention. The *velscale* sets the
scaling of the velocity arrows. If *velscale* is not given then we
read it from the data file as an extra column. The *confidence* sets
the 2-dimensional confidence limit for the ellipse, e.g. 0.95 for 95%
confidence ellipse. Use **+f** to set the font and size of the text
[Default is 9p,Helvetica,black]; give **+f**\ 0 to deactivate
labeling. The arrow will be drawn with the pen attributes specified
by the ``pen`` option and the arrow-head can be colored via
``color``. The ellipse will be filled with the color or shade
specified by the ``uncertaintycolor`` option [Default is
transparent], and its outline will be drawn if ``line`` is selected
using the pen selected (by ``pen`` if not given by ``line``).
Parameters are expected to be in the following columns:
- **1**,\ **2**: longitude, latitude of station
- **3**,\ **4**: eastward, northward velocity
- **5**,\ **6**: uncertainty of eastward, northward velocities
(1-sigma)
- **7**: correlation between eastward and northward components
- **Trailing text**: name of station (optional)
- **n**\ [*barscale*]
Anisotropy bars. *barscale* sets the scaling of the bars. If
*barscale* is not given then we read it from the data file as an
extra column. Parameters are expected to be in the following columns:
- **1**,\ **2**: longitude, latitude of station
- **3**,\ **4**: eastward, northward components of anisotropy
vector
- **r**\ [*velscale*/]\ *confidence*\ [**+f**\ *font*]
Velocity ellipses in rotated convention. The *velscale* sets the
scaling of the velocity arrows. If *velscale* is not given then we
read it from the data file as an extra column. The *confidence* sets
the 2-dimensional confidence limit for the ellipse, e.g. 0.95 for 95%
confidence ellipse. Use **+f** to set the font and size of the text
[Default is 9p,Helvetica,black]; give **+f**\ 0 to deactivate
labeling. The arrow will be drawn with the pen attributes specified
by the ``pen`` option and the arrow-head can be colored via
``color``. The ellipse will be filled with the color or shade
specified by the ``uncertaintycolor`` option [Default is
transparent], and its outline will be drawn if ``line`` is selected
using the pen selected (by ``pen`` if not given by ``line``).
Parameters are expected to be in the following columns:
- **1**,\ **2**: longitude, latitude of station
- **3**,\ **4**: eastward, northward velocity
- **5**,\ **6**: semi-major, semi-minor axes
- **7**: counter-clockwise angle, in degrees, from horizontal axis
to major axis of ellipse.
- **Trailing text**: name of station (optional)
- **w**\ [*wedgescale*/]\ *wedgemag*
Rotational wedges. The *wedgescale* sets the size of the wedges. If
*wedgescale* is not given then we read it from the data file as an
extra column. Rotation values are multiplied by *wedgemag* before
plotting. For example, setting *wedgemag* to 1.e7 works well for
rotations of the order of 100 nanoradians/yr. Use ``color`` to set
the fill color or shade for the wedge, and ``uncertaintycolor`` to
set the color or shade for the uncertainty. Parameters are expected
to be in the following columns:
- **1**,\ **2**: longitude, latitude of station
- **3**: rotation in radians
- **4**: rotation uncertainty in radians
- **x**\ [*cross_scale*]
Strain crosses. The *cross_scale* sets the size of the cross. If
*cross_scale* is not given then we read it from the data file as an
extra column. Parameters are expected to be in the following columns:
- **1**,\ **2**: longitude, latitude of station
- **3**: eps1, the most extensional eigenvalue of strain tensor,
with extension taken positive.
- **4**: eps2, the most compressional eigenvalue of strain tensor,
with extension taken positive.
- **5**: azimuth of eps2 in degrees CW from North.
{J}
{R}
vector : bool or str
Modify vector parameters. For vector heads, append vector head *size*
[Default is 9p]. See
:gmt-docs:`supplements/geodesy/velo.html#vector-attributes` for
specifying additional attributes.
{B}
{CPT}
rescale : str
can be used to rescale the uncertainties of velocities (``spec='e'``
and ``spec='r'``) and rotations (``spec='w'``). Can be combined with
the ``confidence`` variable.
uncertaintycolor : str
Sets the color or shade used for filling uncertainty wedges
(``spec='w'``) or velocity error ellipses (``spec='e'`` or
``spec='r'``). If ``uncertaintycolor`` is not specified, the
uncertainty regions will be transparent. **Note**: Using ``cmap`` and
``zvalue='+e'`` will update the uncertainty fill color based on the
selected measure in ``zvalue`` [magnitude error]. More details at
:gmt-docs:`cookbook/features.html#gfill-attrib`.
color : str
Select color or pattern for filling of symbols [Default is no fill].
**Note**: Using ``cmap`` (and optionally ``zvalue``) will update the
symbol fill color based on the selected measure in ``zvalue``
[magnitude]. More details at
:gmt-docs:`cookbook/features.html#gfill-attrib`.
scale : float or bool
[*scale*].
Scale symbol sizes and pen widths on a per-record basis using the
*scale* read from the data set, given as the first column after the
(optional) *z* and *size* columns [Default is no scaling]. The symbol
size is either provided by ``spec`` or via the input *size* column.
Alternatively, append a constant *scale* that should be used instead of
reading a scale column.
shading : float or bool
*intens*.
Use the supplied *intens* value (nominally in the -1 to +1 range) to
modulate the symbol fill color by simulating illumination [Default is
none]. If *intens* is not provided we will instead read the intensity
from an extra data column after the required input columns determined
by ``spec``.
line: str
[*pen*\ [**+c**\ [**f**\|\ **l**]]].
Draw lines. Ellipses and rotational wedges will have their outlines
drawn using the current pen (see ``pen``). Alternatively, append a
separate pen to use for the error outlines. If the modifier **+cl** is
appended then the color of the pen is updated from the CPT (see
``cmap``). If instead modifier **+cf** is appended then the color from
the cpt file is applied to error fill only [Default]. Use just **+c**
to set both pen and fill color.
no_clip: bool or str
Do NOT skip symbols that fall outside the frame boundary specified
by ``region``. [Default plots symbols inside frame only].
{U}
{V}
pen : str
[*pen*][**+c**\ [**f**\|\ **l**]].
Set pen attributes for velocity arrows, ellipse circumference and fault
plane edges. [Defaults: width = default, color = black, style = solid].
If the modifier **+cl** is appended then the color of the pen is
updated from the CPT (see ``cmap``). If instead modifier **+cf** is
appended then the color from the cpt file is applied to symbol fill
only [Default]. Use just **+c** to set both pen and fill color.
{XY}
zvalue : str
[**m**\|\ **e**\|\ **n**\|\ **u**\ ][**+e**].
Select the quantity that will be used with the CPT given via ``cmap``
to set the fill color. Choose from **m**\ agnitude (vector magnitude
or rotation magnitude), **e**\ ast-west velocity, **n**\ orth-south
velocity, or **u**\ ser-supplied data column (supplied after the
required columns). To instead use the corresponding error estimates
(i.e., vector or rotation uncertainty) to lookup the color and paint
the error ellipse or wedge instead, append **+e**.
{c}
{p}
{t}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
if "S" not in kwargs or ("S" in kwargs and not isinstance(kwargs["S"], str)):
raise GMTInvalidInput("Spec is a required argument and has to be a string.")
if isinstance(data, np.ndarray) and not pd.api.types.is_numeric_dtype(data):
raise GMTInvalidInput(
"Text columns are not supported with numpy.ndarray type inputs. "
"They are only supported with file or pandas.DataFrame inputs."
)
with Session() as lib:
# Choose how data will be passed in to the module
file_context = lib.virtualfile_from_data(check_kind="vector", data=data)
with file_context as fname:
arg_str = " ".join([fname, build_arg_string(kwargs)])
lib.call_module("velo", arg_str)
| bsd-3-clause |
munhyunsu/Hobby | 2018F_SCSCAlgorithm/week6/draw_sorting_comparison.py | 1 | 15909 | import copy
import time
import random
import matplotlib.pyplot as plt
import my_sort
def main():
list01 = ['z', 'y', 'x', 'w', 'v',
'u', 't', 's', 'r', 'q',
'p', 'o', 'n', 'm', 'l',
'k', 'j', 'i', 'h', 'g',
'f', 'e', 'd', 'c', 'b',
'a']
list02 = ['harm', 'winter', 'flow', 'flock', 'pump',
'stop', 'dear', 'cluttered', 'ignorant', 'delicious',
'tan', 'downtown', 'grieving', 'mass', 'smile',
'lively', 'messy', 'peace', 'soup', 'person',
'impulse', 'null', 'box', 'secretive', 'pickle',
'creepy', 'horse', 'resonant', 'thread', 'bed',
'skirt', 'suit', 'camp', 'living', 'natural',
'profit', 'education', 'drain', 'boast', 'grouchy',
'plants', 'tank', 'smoke', 'condition', 'glossy',
'puzzled', 'station', 'start', 'perpetual', 'brake',
'cracker', 'insect', 'ski', 'camp', 'check',
'theory', 'open', 'historical', 'reflect', 'name',
'obsolete', 'billowy', 'baseball', 'precious', 'recess',
'play', 'understood', 'drain', 'sleet', 'fancy',
'accessible', 'good', 'minister', 'watch', 'picture',
'woman', 'raise', 'maid', 'clip', 'aspiring',
'giant', 'thankful', 'beginner', 'hose', 'tap',
'vivacious', 'direction', 'view', 'various', 'puny',
'massive', 'rod', 'whisper', 'books', 'memorise',
'little', 'giddy', 'soak', 'pass', 'sack',
'close', 'current', 'wacky', 'abashed', 'disturbed',
'elfin', 'long', 'squeak', 'thirsty', 'limping',
'brother', 'feeling', 'year', 'offer', 'upbeat',
'black-and-white', 'quicksand', 'film', 'flippant', 'ripe',
'assorted', 'time', 'parsimonious', 'invention', 'shade',
'bomb', 'seal', 'teaching', 'ambitious', 'sand',
'airplane', 'digestion', 'abaft', 'daily', 'honorable',
'clear', 'dad', 'tidy', 'calendar', 'command',
'three', 'milky', 'economic', 'representative', 'sad',
'lunch', 'callous', 'cable', 'magenta', 'prefer',
'trite', 'perform', 'zebra', 'saw', 'lace',
'lush', 'store', 'likeable', 'recondite', 'cobweb',
'carpenter', 'spade', 'box', 'locket', 'jealous',
'unequal', 'bear', 'shiny', 'burn', 'nosy',
'jaded', 'suck', 'untidy', 'sordid', 'one',
'deafening', 'nasty', 'merciful', 'impress', 'ugly',
'space', 'lock', 'thaw', 'canvas', 'loose',
'whip', 'diligent', 'old-fashioned', 'kindly', 'old',
'redundant', 'zealous', 'naughty', 'dam', 'dare',
'loaf', 'crook', 'earth', 'fly', 'discover',
'hall', 'extra-large', 'ban', 'tedious', 'funny',
'beneficial', 'aback', 'house', 'rotten', 'pull',
'pollution', 'wing', 'measure', 'nebulous', 'boat',
'tomatoes', 'bow', 'clean', 'even', 'guard',
'lovely', 'true', 'chickens', 'signal', 'neat',
'judge', 'slim', 'blushing', 'fresh', 'respect',
'matter', 'rule', 'exchange', 'early', 'labored',
'flagrant', 'celery', 'righteous', 'talk', 'chin',
'touch', 'club', 'pricey', 'moldy', 'overwrought',
'weak', 'self', 'breakable', 'skate', 'poor',
'small', 'approval', 'terrible', 'abusive', 'zephyr',
'finger', 'equal', 'left', 'grip', 'battle',
'value', 'poised', 'prepare', 'erect', 'detail',
'flesh', 'ludicrous', 'thought', 'ceaseless', 'bird',
'watery', 'well-to-do', 'spot', 'cake', 'blue-eyed',
'eight', 'amuse', 'spotty', 'exclusive', 'shiver',
'load', 'puzzling', 'amuck', 'holiday', 'muddled',
'drown', 'dangerous', 'next', 'taste', 'huge',
'move', 'tired', 'feigned', 'faint', 'unsuitable',
'knowing', 'abrupt', 'guarded', 'waves', 'moaning',
'shelter', 'wretched', 'suit', 'charge', 'toad',
'clever', 'flash', 'sparkle', 'crack', 'ritzy',
'building', 'school', 'thank', 'potato', 'force',
'enchanting', 'sweater', 'extend', 'idea', 'spell',
'friendly', 'satisfy', 'rub', 'depressed', 'spot',
'morning', 'hang', 'remarkable', 'twig', 'crown',
'explode', 'gratis', 'deserted', 'helpful', 'place',
'pizzas', 'deep', 'point', 'hard', 'hurt',
'adjoining', 'glistening', 'alarm', 'zesty', 'knowledge',
'trees', 'impartial', 'incredible', 'reject', 'bustling',
'resolute', 'slope', 'train', 'pear', 'wave',
'fireman', 'trousers', 'mine', 'wash', 'face',
'ashamed', 'arch', 'yielding', 'second-hand', 'leg',
'lip', 'use', 'sail', 'base', 'rainy',
'spark', 'increase', 'lean', 'open', 'fish',
'uttermost', 'laborer', 'disagreeable', 'plot', 'creature',
'sudden', 'possessive', 'elated', 'cows', 'vein',
'trip', 'worm', 'quiet', 'decorate', 'lyrical',
'selfish', 'ruddy', 'afternoon', 'crayon', 'pinch',
'proud', 'statement', 'church', 'notebook', 'equable',
'coil', 'gate', 'dolls', 'risk', 'transport',
'cough', 'vengeful', 'grade', 'scientific', 'choke',
'boundary', 'attend', 'doubtful', 'volatile', 'unwieldy',
'zippy', 'loss', 'tightfisted', 'comfortable', 'godly',
'top', 'rejoice', 'change', 'glass', 'secretary',
'correct', 'parallel', 'chilly', 'jump', 'acceptable',
'blow', 'peck', 'gentle', 'thick', 'drawer',
'striped', 'determined', 'playground', 'itch', 'van',
'letter', 'jail', 'turn', 'legal', 'cream',
'vacation', 'phobic', 'sugar', 'rare', 'expand',
'partner', 'evanescent', 'account', 'thrill', 'lacking',
'murder', 'quixotic', 'plant', 'request', 'heartbreaking',
'tall', 'rinse', 'art', 'flap', 'muddle',
'chivalrous', 'extra-small', 'ticket', 'shirt', 'ear',
'silent', 'hole', 'overjoyed', 'expensive', 'homely',
'stitch', 'hanging', 'claim', 'plough', 'produce',
'yoke', 'meeting', 'fearful', 'suggestion', 'tumble',
'daffy', 'bang', 'wry', 'invent', 'learn',
'omniscient', 'imperfect', 'butter', 'tearful', 'boorish',
'tame', 'yam', 'call', 'summer', 'descriptive',
'clean', 'mellow', 'children', 'cheap',
'encouraging', 'admire', 'happen', 'science', 'company',
'roasted', 'dysfunctional', 'seemly', 'belief', 'borrow',
'dime', 'angle', 'scattered', 'lonely', 'silent',
'jeans', 'relation', 'stomach', 'abrasive', 'nippy',
'mother', 'long', 'army', 'curl', 'development',
'title', 'guide', 'pedal', 'road', 'dust',
'painful', 'cannon', 'mourn', 'line', 'womanly',
'detailed', 'zip', 'receipt', 'mom', 'oven',
'upset', 'foregoing', 'hug', 'car', 'gusty',
'bubble', 'form', 'nail', 'cause', 'caring',
'afford', 'whirl', 'switch', 'eye', 'man',
'rude', 'smell', 'frightening', 'half', 'flaky',
'regret', 'question', 'answer', 'cross', 'periodic',
'talk', 'governor', 'shrug', 'offend', 'doubt',
'abundant', 'system', 'silky', 'lumber', 'outgoing',
'vagabond', 'noiseless', 'flashy', 'type', 'cloistered',
'verdant', 'thirsty', 'mundane', 'observation', 'angry',
'soggy', 'smoke', 'girls', 'comparison', 'heat',
'clap', 'jail', 'rock', 'design', 'tow',
'handle', 'finger', 'scintillating', 'courageous', 'nervous',
'owe', 'acoustics', 'clover', 'grape', 'turkey',
'wealthy', 'sign', 'window', 'stir', 'consist',
'unhealthy', 'tire', 'crooked', 'x-ray', 'full',
'arrest', 'stage', 'scratch', 'immense', 'rail',
'husky', 'suffer', 'disgusted', 'painstaking', 'glove',
'effect', 'aloof', 'story', 'possess', 'death',
'secret', 'cheerful', 'utopian', 'leather', 'smiling',
'adjustment', 'spoon', 'balance', 'tense', 'glamorous',
'graceful', 'rescue', 'street', 'twist', 'efficient',
'acrid', 'rampant', 'complete', 'false', 'fork',
'welcome', 'sweet', 'growth', 'exist', 'frequent',
'truthful', 'holistic', 'average', 'dark', 'replace',
'hover', 'steel', 'damaged', 'ready', 'throne',
'group', 'dull', 'sloppy', 'damage', 'crabby',
'like', 'vigorous', 'trail', 'coil', 'chubby',
'petite', 'habitual', 'illegal', 'nonchalant', 'expansion',
'aboard', 'strange', 'pin', 'injure', 'knock',
'zoom', 'crowded', 'skin', 'price', 'elastic',
'coherent', 'beef', 'eyes', 'sock', 'agree',
'willing', 'prick', 'suggest', 'complete', 'agreement',
'pot', 'fact', 'boot', 'enter', 'soft',
'stone', 'record', 'lick', 'heat', 'ignore',
'dress', 'defective', 'adventurous', 'illustrious', 'file',
'practise', 'gifted', 'permit', 'decide', 'snatch',
'mind', 'ruin', 'clear', 'obtain', 'plant',
'wink', 'honey', 'tiresome', 'pink', 'mailbox',
'sleepy', 'knot', 'save', 'surprise', 'tangy',
'pastoral', 'juvenile', 'broken', 'bikes', 'hunt',
'orange', 'join', 'card', 'ad hoc', 'lighten',
'cure', 'bath', 'surprise', 'pies', 'animal',
'action', 'ubiquitous', 'synonymous', 'mark', 'fang',
'stain', 'tour', 'parcel', 'mixed', 'quirky',
'spiders', 'stew', 'necessary', 'jam', 'field',
'channel', 'front', 'sprout', 'psychedelic', 'shoe',
'embarrass', 'work', 'describe', 'wish', 'test',
'return', 'ill', 'lavish', 'past', 'trace',
'bore', 'romantic', 'nauseating', 'excited', 'pause',
'vase', 'fold', 'somber', 'spring', 'giraffe',
'mute', 'blue', 'macabre', 'hospital', 'rapid',
'meal', 'try', 'basin', 'unit', 'wind',
'concentrate', 'hand', 'follow', 'intelligent', 'fuzzy',
'annoy', 'range', 'queen', 'thinkable', 'sheet',
'brush', 'highfalutin', 'squalid', 'trust', 'skinny',
'contain', 'dead', 'jewel', 'found', 'weary',
'rough', 'teeny-tiny', 'wrench', 'lewd', 'metal',
'dislike', 'shame', 'motionless', 'grease', 'dirt',
'rebel', 'jar', 'frame', 'pump', 'grandmother',
'distance', 'greasy', 'wander', 'dry', 'nest',
'grab', 'root', 'scare', 'scale', 'share',
'snore', 'produce', 'flight', 'condemned', 'ethereal',
'abnormal', 'unable', 'spectacular', 'truck', 'baby',
'invincible', 'yard', 'cool', 'retire', 'brown',
'overflow', 'seashore', 'float', 'tasteless', 'hapless',
'slave', 'lucky', 'guide', 'earthy', 'income',
'square', 'miscreant', 'regular', 'attack', 'writing',
'thing', 'heady', 'aftermath', 'aware', 'amusing',
'friction', 'peel', 'longing', 'tease', 'sore',
'camera', 'frantic', 'twist', 'thundering', 'numerous',
'advise', 'erratic', 'place', 'ocean', 'kneel',
'rock', 'tick', 'guitar', 'grate', 'acidic',
'tax', 'rhetorical', 'therapeutic', 'grateful', 'apparatus',
'jumpy', 'outstanding', 'mice', 'reaction', 'stale',
'excuse', 'wide-eyed', 'basket', 'deserve', 'grandfather',
'furtive', 'bucket', 'discreet', 'expect', 'disgusting',
'third', 'country', 'abandoned', 'fetch', 'bolt',
'tip', 'step', 'grass', 'agreeable', 'spurious',
'color', 'separate', 'picayune', 'pack', 'pat',
'credit', 'internal', 'compare', 'vessel', 'whine',
'instinctive', 'squash', 'draconian', 'abounding', 'substantial',
'hum', 'black', 'alcoholic', 'magic', 'shape',
'stem', 'quarter', 'absent', 'lively', 'quilt',
'land', 'point', 'harsh', 'songs', 'side',
'real', 'sisters', 'edge', 'receptive', 'overrated',
'knot', 'cast', 'thoughtful', 'live', 'anxious',
'support', 'show', 'soothe', 'meek', 'cushion',
'plantation', 'responsible', 'pig', 'puffy', 'drag',
'ants', 'ancient', 'pleasant', 'ajar', 'actor',
'political', 'groovy', 'unaccountable', 'empty', 'hook',
'want', 'tame', 'record', 'plug', 'stimulating',
'violent']
time_bubble = list()
time_selection = list()
time_insertion = list()
time_merge = list()
time_quick = list()
time_heap = list()
time_lib = list()
x_axis = [100, 200, 300, 400, 500,
600, 700, 800, 900, 1000]
for index in x_axis:
index = index + 1
random.shuffle(list02)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.bubble_sort(target)
end_time = time.process_time()
time_bubble.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.selection_sort(target)
end_time = time.process_time()
time_selection.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.insertion_sort(target)
end_time = time.process_time()
time_insertion.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.merge_sort(target)
end_time = time.process_time()
time_merge.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.quick_sort(target)
end_time = time.process_time()
time_quick.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
my_sort.heap_sort(target)
end_time = time.process_time()
time_heap.append(end_time - start_time)
target = copy.deepcopy(list02[:index])
start_time = time.process_time()
target.sort()
end_time = time.process_time()
time_lib.append(end_time - start_time)
print(time_bubble)
print(time_selection)
print(time_insertion)
print(time_merge)
print(time_quick)
print(time_heap)
print(time_lib)
plt.plot(x_axis, time_bubble, 'r.-', label='Bubble')
plt.plot(x_axis, time_selection, 'g.-', label='Selection')
plt.plot(x_axis, time_insertion, 'b.-', label='Insertion')
plt.plot(x_axis, time_merge, 'm.-', label='Merge')
plt.plot(x_axis, time_quick, 'y.-', label='Quick')
plt.plot(x_axis, time_quick, 'c.-', label='Heap')
plt.plot(x_axis, time_lib, 'k.-', label='Library')
plt.legend(bbox_to_anchor=(0.05, 0.95), loc=2, borderaxespad=0.0)
plt.title('compare execution time between sorting algorithm')
plt.ylabel('Execution time')
plt.xlabel('The number of words')
plt.savefig('output.png', bbox_inches='tight')
plt.show()
plt.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
liangfok/controlit_demos | dreamer_controlit_demos/nodes/Demo8_EERC_Groundbreaking_Ceremony.py | 1 | 3375 | #!/usr/bin/env python
'''
This demo is for the UT Austin EERC Groundbreaking ceremony to be
held on Feb 26, 2015 in front of RLM.
To start the hand wave demo:
$ rostopic pub --once /demo8/cmd std_msgs/Int32 'data: 0'
'''
import sys, getopt # for getting and parsing command line arguments
# import time
# import math
# import threading
import rospy
from std_msgs.msg import Int32
import Demo4_HandWave
import Demo5_HandShake
import Demo7_HookemHorns
# import numpy as np
# from scipy.interpolate import interp1d
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import DreamerInterface
# import Trajectory
# import TrajectoryGeneratorCubicSpline
ENABLE_USER_PROMPTS = False
DEMO_NONE = -1
DEMO_WAVE = 0
DEMO_SHAKE = 1
DEMO_HOOKEM_HORNS = 2
# Shoulder abductors about 10 degrees away from body and elbows bent 90 degrees
# DEFAULT_POSTURE = [0.0, 0.0, # torso
# 0.0, 0.174532925, 0.0, 1.57, 0.0, 0.0, 0.0, # left arm
# 0.0, 0.174532925, 0.0, 1.57, 0.0, 0.0, 0.0] # right arm
# Shoulder abductors and elbows at about 10 degrees
DEFAULT_POSTURE = [0.0, 0.0, # torso
0.0, 0.174532925, 0.0, 0.174532925, 0.0, 0.0, 0.0, # left arm
0.0, 0.174532925, 0.0, 0.174532925, 0.0, 0.0, 0.0] # right arm
class Demo8_EERC_Groundbreaking_Ceremony:
def __init__(self):
self.dreamerInterface = DreamerInterface.DreamerInterface(ENABLE_USER_PROMPTS)
self.handWaveDemo = Demo4_HandWave.Demo4_HandWave(self.dreamerInterface)
self.handShakeDemo = Demo5_HandShake.Demo5_HandShake(self.dreamerInterface)
self.hookemHornsDemo = Demo7_HookemHorns.Demo7_HookemHorns(self.dreamerInterface)
self.runDemo = False
self.demoNumber = DEMO_NONE
self.demoCmdSubscriber = rospy.Subscriber("/demo8/cmd", Int32, self.demoCmdCallback)
self.demoDonePublisher = rospy.Publisher("/demo8/done", Int32, queue_size=1)
self.doneMessage = Int32()
self.doneMessage.data = 1
def demoCmdCallback(self, msg):
self.demoNumber = msg.data
self.runDemo = True
def run(self):
while not rospy.is_shutdown():
if self.runDemo:
if self.demoNumber == DEMO_WAVE:
print "Starting the Hand Wave Demo!"
self.handWaveDemo.run(enablePrompts = False)
elif self.demoNumber == DEMO_SHAKE:
print "Starting the Hand Shake Demo!"
self.handShakeDemo.run(enablePrompts = False)
elif self.demoNumber == DEMO_HOOKEM_HORNS:
print "Starting the Hook'em Horns Demo!"
self.hookemHornsDemo.run(enablePrompts = False)
print "Done executing demo. Publishing done message."
self.demoDonePublisher.publish(self.doneMessage)
self.runDemo = False
self.demoNumber = DEMO_NONE
rospy.sleep(0.1)
# Main method
if __name__ == "__main__":
rospy.init_node('Demo8_EERC_Groundbreaking_Ceremony', anonymous=True)
demo = Demo8_EERC_Groundbreaking_Ceremony()
demo.run()
print "Demo 8 done, waiting until ctrl+c is hit..."
rospy.spin() # just to prevent this node from exiting
| lgpl-2.1 |
avistous/QSTK | Examples/FeatureSearch/code.py | 3 | 6349 | ''' Python imports '''
import datetime as dt
''' 3rd party imports '''
import numpy as np
import pandas as pand
import matplotlib.pyplot as plt
''' QSTK imports '''
from qstkutil import DataAccess as da
from qstkutil import qsdateutil as du
from qstkfeat.features import *
from qstkfeat.classes import class_fut_ret
import qstkfeat.featutil as ftu
import sys
import time
from functions import *
if __name__ == '__main__':
''' Use Dow 30 '''
#lsSym = ['AA', 'AXP', 'BA', 'BAC', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'HD', 'HPQ', 'IBM', 'INTC', 'JNJ', \
# 'JPM', 'KFT', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'PFE', 'PG', 'T', 'TRV', 'UTX', 'WMT', 'XOM' ]
#lsSymTrain = lsSym[0:4] + ['$SPX']
#lsSymTest = lsSym[4:8] + ['$SPX']
f = open('2008Dow30.txt')
lsSymTrain = f.read().splitlines() + ['$SPX']
f.close()
f = open('2010Dow30.txt')
lsSymTest = f.read().splitlines() + ['$SPX']
f.close()
lsSym = list(set(lsSymTrain).union(set(lsSymTest)))
dtStart = dt.datetime(2008,01,01)
dtEnd = dt.datetime(2010,12,31)
norObj = da.DataAccess('Norgate')
ldtTimestamps = du.getNYSEdays( dtStart, dtEnd, dt.timedelta(hours=16) )
lsKeys = ['open', 'high', 'low', 'close', 'volume']
ldfData = norObj.get_data( ldtTimestamps, lsSym, lsKeys ) #this line is important even though the ret value is not used
for temp in ldfData:
temp.fillna(method="ffill").fillna(method="bfill")
ldfDataTrain = norObj.get_data( ldtTimestamps, lsSymTrain, lsKeys )
ldfDataTest = norObj.get_data( ldtTimestamps, lsSymTest, lsKeys)
for temp in ldfDataTrain:
temp.fillna(method="ffill").fillna(method="bfill")
for temp in ldfDataTest:
temp.fillna(method="ffill").fillna(method="bfill")
dDataTrain = dict(zip(lsKeys, ldfDataTrain))
dDataTest = dict(zip(lsKeys, ldfDataTest))
''' Imported functions from qstkfeat.features, NOTE: last function is classification '''
lfcFeatures = [ featMA, featMA, featMA, featMA, featMA, featMA, \
featRSI, featRSI, featRSI, featRSI, featRSI, featRSI, \
featDrawDown, featDrawDown, featDrawDown, featDrawDown, featDrawDown, featDrawDown, \
featRunUp, featRunUp, featRunUp, featRunUp, featRunUp, featRunUp, \
featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, \
featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, \
#featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, featStochastic,featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, \
featBeta, featBeta, featBeta, featBeta, featBeta, featBeta,\
featBollinger, featBollinger, featBollinger, featBollinger, featBollinger, featBollinger,\
featCorrelation, featCorrelation, featCorrelation, featCorrelation, featCorrelation, featCorrelation,\
featPrice, \
featVolume, \
class_fut_ret]
ldArgs = [ {'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5,'bDown':True},{'lLookback':10,'bDown':True},{'lLookback':20,'bDown':True},{'lLookback':5,'bDown':False},{'lLookback':10,'bDown':False},{'lLookback':20,'bDown':False},{'lLookback':5,'bDown':True,'MR':True},{'lLookback':10,'bDown':True,'MR':True},{'lLookback':20,'bDown':True,'MR':True},{'lLookback':5,'bDown':False,'MR':True},{'lLookback':10,'bDown':False,'MR':True},{'lLookback':20,'bDown':False,'MR':True},\
#{'lLookback':5,'bFast':True},{'lLookback':10,'bFast':True},{'lLookback':20,'bFast':True},{'lLookback':5,'bFast':False},{'lLookback':10,'bFast':False},{'lLookback':20,'bFast':False},{'lLookback':5,'bFast':True,'MR':True},{'lLookback':10,'bFast':True,'MR':True},{'lLookback':20,'bFast':True,'MR':True},{'lLookback':5,'bFast':False,'MR':True},{'lLookback':10,'bFast':False,'MR':True},{'lLookback':20,'bFast':False,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{},\
{},\
{'i_lookforward':5}
]
''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data '''
ldfFeaturesTrain = ftu.applyFeatures( dDataTrain, lfcFeatures, ldArgs, '$SPX')
ldfFeaturesTest = ftu.applyFeatures( dDataTest, lfcFeatures, ldArgs, '$SPX')
''' Pick Test and Training Points '''
dtStartTrain = dt.datetime(2008,01,01)
dtEndTrain = dt.datetime(2009,12,31)
dtStartTest = dt.datetime(2010,01,01)
dtEndTest = dt.datetime(2010,12,31)
''' Stack all information into one Numpy array '''
naFeatTrain = ftu.stackSyms( ldfFeaturesTrain, dtStartTrain, dtEndTrain )
naFeatTest = ftu.stackSyms( ldfFeaturesTest, dtStartTest, dtEndTest )
''' Normalize features, use same normalization factors for testing data as training data '''
ltWeights = ftu.normFeatures( naFeatTrain, -1.0, 1.0, False )
''' Normalize query points with same weights that come from test data '''
ftu.normQuery( naFeatTest[:,:-1], ltWeights )
lFeatures = range(0,len(lfcFeatures)-1)
classLabelIndex = len(lfcFeatures) - 1
funccall = sys.argv[1] + '(naFeatTrain,naFeatTest,lFeatures,classLabelIndex)'
timestart = time.time()
clockstart = time.clock()
eval(funccall)
clockend = time.clock()
timeend = time.time()
sys.stdout.write('\n\nclock diff: '+str(clockend-clockstart)+'sec\n')
sys.stdout.write('time diff: '+str(timeend-timestart)+'sec\n')
| bsd-3-clause |
m-takeuchi/ilislife | main.py | 1 | 20577 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import division # 少数点以下表示のためのモジュール
### For logging to current dir/log_dir
import os
from sys import platform as _platform
from kivy.config import Config
Config.set('kivy', 'log_name', _platform+'_kivy_%y-%m-%d_%_.txt')
Config.set('kivy', 'log_level', 'debug')
Config.set('kivy', 'log_dir', os.path.dirname(os.path.abspath(__file__))+'/logs/')
from functools import partial
# from kivy.lang import Builder
from kivy.uix.widget import Widget
# from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.slider import Slider
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.properties import NumericProperty, BooleanProperty, ListProperty, ObjectProperty
from kivy.garden.graph import Graph, MeshLinePlot
from kivy.clock import Clock
from kivy.app import App
# Importing my modules
import e3640a_prologix as BPHV
import hioki
import gid7 ### for ion guage
from kivy.properties import StringProperty
import datetime as dtm
import time
import random
import numpy as np
import plot_tdepend as tdep
import email_pdf as epdf
# Device settings
VeAddr = 5
IcAddr = 1
IgAddr = 2
if _platform == "linux" or _platform == "linux2":
# linux
tty = '/dev/ttyUSB0'
ttyRS232 = '/dev/ttyUSB1'
elif _platform == "darwin":
# OS X
tty = '/dev/tty.usbserial-PXWV0AMC'
ttyRS232 = '/dev/tty.usbserial-FTAJM1O6'
elif _platform == "win32":
# Windows...
# tty =
pass
# Load sequence configuration
from config import *
# Prepare file
directory = 'data/'
filename = directory+"{0:%y%m%d-%H%M%S}.dat".format(dtm.datetime.now())
with open(filename, mode = 'w', encoding = 'utf-8') as fh:
# fh.write('#date\ttime(s)\tVe(kV)\tIg(V)\tIc(V)\n')
fh.write('#date\ttime(s)\tVe(kV)\tIg(V)\tIc(V)\tP(Pa)\n')
# Time to make summary graph wih matplotlib
time_mkgraph = 6*3600# sec
class MyRoot(TabbedPanel):
pass
class MainView(BoxLayout):
is_countup = BooleanProperty(False)
is_sequence = BooleanProperty(False)
is_connected = BooleanProperty(False)
is_changevolt = BooleanProperty(False)
is_holdvolt = BooleanProperty(False)
time_now = NumericProperty(0)
volt_now = NumericProperty(0.0)
volt_target = NumericProperty(0.0)
seq = ListProperty(SEQ)
seq_now = NumericProperty(0)
left_time = NumericProperty()
Ve_status = StringProperty('Ve')
Ic_status = StringProperty('Ic')
Ig_status = StringProperty('Ig')
P_status = StringProperty('P')
Ve_value = NumericProperty()
Ig_value = NumericProperty()
Ic_value = NumericProperty()
P_value = NumericProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_command(self, command):
# global Ve_obj, Ic_obj, Ig_obj
global Ve_obj, Ic_obj, Ig_obj, P_obj
if command == 'connect/disconnect':
if self.is_connected:
self.disconnect_device()#Ve_obj, Ic_obj, Ig_obj)
else:
self.time_now = 0
# Ve_obj, Ic_obj, Ig_obj = self.connect_device()
Ve_obj, Ic_obj, Ig_obj, P_obj = self.connect_device()
elif command == 'start/stop':
if self.is_countup:
self.stop_timer()
# self.Stop_IncVolt()
self.abort_sequence()
else:
# if self.is_connected:
if Ve_obj:
msg = Ve_obj.Clear()
### for simple test ###
# self.Start_IncVolt(1000, dt)
self.start_timer()
self.start_sequence(self.seq)
# MyGraph.do_toggle()
# MyGraph.do_toggle()
#######################
# else:
# print('Connect first')
elif command == 'reset':
self.abort_sequence()
def on_countup(self, dt):
"""Callback function for fetching measured values
"""
try:
self.Ig_value = Ig_obj.Measure()
self.Ic_value = Ic_obj.Measure()
start = time.time()
self.P_value = P_obj.RP()
#elapsed_time = time.time() - start
#print('elapsed_time: '+str(elapsed_time))
self.Ic_status = str(self.Ic_value)
self.Ig_status = str(self.Ig_value)
self.P_status = "{0:1.2e}".format(self.P_value)
except ValueError:
self.Ig_value = 0
self.Ic_value = 0
self.Ic_status = Ic_obj.ClearBuffer()
self.Ig_status = Ig_obj.ClearBuffer()
self.Ve_value = self.volt_now
### データをファイルに追記
StoreValue.append_to_file(filename, [self.time_now, self.Ve_value, self.Ig_value, self.Ic_value, "{0:1.2e}".format(self.P_value)])
### 経過時間がtime_mkgraphの整数倍の時、グラフpdfを作成 & Send email
if self.time_now != 0 and self.time_now%time_mkgraph == 0:
tot_dose = tdep.generate_plot(filename)
pdffile = filename.rsplit('.dat')[0]+'.pdf'
print(tot_dose)
sbj = "[ILISLIFE] Summary Report for the last {0} hours".format(self.time_now/3600)
msg = "Total dose is {0} (C), {1} (C) and {2} (C) for Ig, Ic and Ig+Ic, repectively.".format(tot_dose[0], tot_dose[1], tot_dose[2])
epdf.push_email('email.json', sbj, msg, pdffile)
### データをMyGraphに送る
MyGraph.to_val = [self.time_now, self.Ve_value, self.Ig_value, self.Ic_value, self.P_value]
self.time_now += 1
def start_timer(self):
self.is_countup = True
Clock.schedule_interval(self.on_countup, dt_meas)
pass
def stop_timer(self):
self.is_countup = False
Clock.unschedule(self.on_countup)
pass
def connect_device(self):#, tty, VeAddr, IcAddr, IgAddr):
"""各GPIB機器およびRS232機器を設定する
"""
Ve_obj = BPHV.E3640A(tty, VeAddr)
Ic_obj = hioki.dmm3239gpib(tty, IcAddr)
Ig_obj = hioki.dmm3239gpib(tty, IgAddr)
P_obj = gid7.RS232(ttyRS232)
P_obj.RE() # Set GI-D7 into Remote control mode
self.Ve_status = Ve_obj.Query('*IDN?')
self.Ic_status = Ic_obj.Query('*IDN?')
self.Ig_status = Ig_obj.Query('*IDN?')
self.P_status = P_obj.GS() # Ask device
msg = Ve_obj.Clear()
Ic_obj.Mode()
Ic_obj.SampleRate(rate='medium')
Ig_obj.Mode()
Ig_obj.SampleRate(rate='medium')
P_obj.F1() # Turn filament on
self.is_connected = True
return Ve_obj, Ic_obj, Ig_obj, P_obj
def disconnect_device(self):#, Ve_obj, Ic_obj, Ig_obj):
"""設定したGPIB機器を初期状態に戻し, ポートを開放する
"""
Ve_obj.VoltZero()
Ve_obj.ShutDown()
Ve_obj.Clear()
Ic_obj.Rst()
Ig_obj.Rst()
P_obj.F0()
P_obj.LO()
self.Ve_status = 'Disconnected'
self.Ic_status = 'Disconnected'
self.Ig_status = 'Disconnected'
self.P_status = 'Disconnected'
# Ve_obj.ClosePort()
self.is_connected = False
# def increment_Volt(self, dt):
def increment_Volt(self, volt_target, *largs):
"""Callback for increasing voltage
"""
# print('I am in increment_Volt')
self.volt_now = Ve_obj.AskVolt()*1000
volt_raw_now = self.volt_now/1000
deltaV_raw = dV/1000
next_raw = '{0:.2f}'.format(volt_raw_now + deltaV_raw)
Ve_obj.Instruct('volt ' + str(next_raw))
Ve_obj.OutOn()
# self.volt_now = '{0:.2f}'.format(Ve_obj.AskVolt())*1000
self.volt_now = Ve_obj.AskVolt()*1000
self.Ve_status = str(self.volt_now)
if self.volt_now >= volt_target:
self.is_changevolt = False
return False
# def decrement_Volt(self, dt):
def decrement_Volt(self, volt_target, *largs):
"""Callback for decreasing voltage
"""
# step_raw = Ve_obj.VoltStep(dV)
self.volt_now = Ve_obj.AskVolt()*1000
# print(type(self.volt_now))
volt_raw_now = self.volt_now/1000
deltaV_raw = dV/1000
next_raw = '{0:.2f}'.format(volt_raw_now - deltaV_raw)
Ve_obj.Instruct('volt ' + str(next_raw))
Ve_obj.OutOn()
# self.volt_now = '{0:.2f}'.format(Ve_obj.AskVolt())*1000
self.volt_now = Ve_obj.AskVolt()*1000
self.Ve_status = str(self.volt_now)
if self.volt_now <= volt_target:
self.is_changevolt = False
return False
def change_Volt(self, volt_target, *largs):
"""Callback for change voltage
"""
self.volt_now = Ve_obj.AskVolt()*1000
if self.volt_now == volt_target:
self.is_changevolt = False
return False
elif self.volt_now < volt_target:
self.increment_Volt(volt_target, *largs)
elif self.volt_now > volt_target:
self.decrement_Volt(volt_target, *largs)
else:
print('End change_Volt')
return False
def hold_Volt(self, left_time, *largs):
"""Hold voltage output by left_time == 0
"""
self.volt_now = Ve_obj.AskVolt()*1000
# print(type(self.volt_now))
volt_raw_now = self.volt_now/1000
# Ve_obj.Instruct('volt ' + str(volt_raw_now))
# Ve_obj.OutOn()
self.Ve_status = str(self.volt_now)
if self.left_time <= 0:
self.seq_now += 1 #シーケンスを1進める
self.is_holdvolt = False
return False
self.left_time -= 1
def start_sequence(self, seqlist):
"""Start voltage sequence
"""
if not self.is_sequence:
self.is_sequence = True
# trigger = Clock.create_trigger(self.on_countdown, dt_meas)
# trigger()
Clock.schedule_interval(self.on_countdown, dt_meas/5)
print('created on_countdown trigger')
def on_countdown(self, dt):
"""Callback for voltage sequence
"""
if self.seq_now <= len(self.seq) -1:
# print('I am in on_countdonw'+str(self.seq_now))
self.volt_target = self.seq[self.seq_now][0]
if self.volt_now != self.volt_target:
### 電圧変更中でない場合
if not self.is_changevolt:
# イベントループに投入
self.is_changevolt = True
Clock.schedule_interval(partial(self.change_Volt, self.volt_target), dt_op)
print('Now on change voltage')
### 現在電圧が現在シーケンス設定電圧と等しく, 電圧変更中でなく, hold_Volt中でない場合
else:
if not self.is_changevolt and not self.is_holdvolt:
self.is_holdvolt = True
self.left_time = self.seq[self.seq_now][1] #left_timeにシーケンスリスト
# イベントループに投入
Clock.schedule_interval(partial(self.hold_Volt, self.left_time), dt_op)
print('Now on hold voltage')
# except IndexError:
elif self.seq_now > len(self.seq) -1:
print('All sequences are finished. Measurement is now stopped.')
self.abort_sequence()
def get_seq(self):
"""Read sequence status
"""
try:
out = str(self.seq[self.seq_now])
except IndexError:
out = 'Sequence is over!'
return out
def format_seq(self, cur_seq):
if cur_seq <= len(self.seq) -1:
output = str(cur_seq)+' [sub]th[/sub] '+str(self.seq[cur_seq][0])+' [sub]V[/sub] '+str(self.seq[cur_seq][1])+' [sub]s[/sub]'
else:
output = str(cur_seq-1)+' [sub]th[/sub] '+str(self.seq[cur_seq-1][0])+' [sub]V[/sub] '+str(self.seq[cur_seq-1][1])+' [sub]s[/sub]'
return output
def lapse_time(self, t):
"""Retrun lapse time (hh:mm:ss format)
"""
rh = (t-t%3600)/3600
rm = (t-rh*3600-((t-rh*3600)%60))/60
rs = t%60
return "{0:2.0f} [sub][i]H[/i][/sub] {1:2.0f} [sub][i]M[/i][/sub] {2:2.0f} [sub][i]S[/i][/sub]".format(rh,rm,rs)
def total_time(self):
l = len(self.seq)
total=0
for i in range(l):
if i == 0:
total += (self.seq[i][0]/dV)*dt_op
else:
total += ((self.seq[i][0]-self.seq[i-1][0])/dV)*dt_op
total += self.seq[i][1]
return total
def remaining_time(self,t):
total = self.total_time()
rt = total - t
return self.lapse_time(rt)
def abort_sequence(self):
"""Force to abort measurement immediately
"""
self.is_countup = False
self.is_changevolt = False
self.is_sequence = False
self.is_holdvolt = False
# events = Clock.get_events()
# for ev in events:
# # Clock.unschedule(ev)
# ev.cancel()
try:
Clock.unschedule(self.change_Volt)
except:
print('abort_sequence error 3')
pass
try:
Clock.unschedule(self.on_countdown)
except:
print('abort_sequence error 2')
pass
try:
Clock.unschedule(self.on_countup)
except:
print('abort_sequence error 1')
pass
try:
Clock.unschedule(self.hold_Volt)
except:
print('abort_sequence error 4')
pass
if self.is_connected:
msg = Ve_obj.Clear()
Ve_obj.VoltZero()
Ve_obj.OutOff()
self.seq_now = 0
self.time_now = 0
self.volt_now = Ve_obj.AskVolt()*1000
# Ve_obj.ShutDown()
# Ig_obj.Cls()
# Ic_obj.Cls()
P_obj.F0()
P_obj.LO()
pass
def Start_IncVolt(self, volt_target, dt):
Clock.schedule_interval(partial(self.increment_Volt, volt_target), dt)
self.is_changevolt = True
pass
def Stop_IncVolt(self):
Clock.unschedule(self.increment_Volt)
self.is_countup = False
pass
class MyGraph(BoxLayout):
graph_plot = ObjectProperty(None)
sensorEnabled = BooleanProperty(False)
graph_y_upl = NumericProperty(2)
graph_y_lwl = NumericProperty(-1)
graph_x_range = NumericProperty(600)
graph_x_hist = NumericProperty(0)
graph_x_step = NumericProperty(600)
data_buffer = ListProperty([[],[],[]])
BUFFSIZE = 43200 # 12 hours = 12*3600 sec
to_val = ListProperty([0,0,0,0])#, force_dispatch=True)
Ve_value = NumericProperty()
Ig_value = NumericProperty()
Ic_value = NumericProperty()
P_value = NumericProperty()
# val = np.zeros((BUFFSIZE, 4))
val = np.zeros((BUFFSIZE, 5))
t_lapse = np.arange(0,BUFFSIZE)
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.prepare_graph,0)
def prepare_graph(self, dt):
self.graph = self.graph_plot
# self.graph = self.ids.graph_plot
print('**************************')
print(self.graph)
print('**************************')
self.plot = []
self.plot.append(MeshLinePlot(color=[1, 0, 0, 1])) # X - Red
self.plot.append(MeshLinePlot(color=[0, 1, 0, 1])) # Y - Green
self.plot.append(MeshLinePlot(color=[0, 0.5, 1, 1])) # Z - Blue
self.plot.append(MeshLinePlot(color=[0.5, 0.5, 1, 1])) # pressure log
self.reset_plots()
for plot in self.plot:
self.graph.add_plot(plot) # Add MeshLinePlot object of garden.graph into Graph()
# graph.add_plot(plot) # Add MeshLinePlot object of garden.graph into Graph()
def ymin_up(self):
if (self.graph_y_upl -1 > self.graph_y_lwl):
self.graph_y_lwl += 1
def ymax_down(self):
if (self.graph_y_upl -1 > self.graph_y_lwl):
self.graph_y_upl -= 1
def reset_plots(self):
for plot in self.plot:
# plot.points = [(0, 0),(1,0.5)]
plot.points = [(0, 0)]
# self.counter = 1
def do_toggle(self):
try:
if not self.sensorEnabled:
print('excuted do_toggle()',dt_meas)
# Clock.schedule_interval(StoreValue.make_random_data, dt_meas)
Clock.schedule_interval(self.get_mydata,dt_meas)
self.sensorEnabled = True
else:
# Clock.unschedule(StoreValue.make_random_data)
Clock.unschedule(self.get_mydata)
self.sensorEnabled = False
except NotImplementedError:
popup = ErrorPopup()
popup.open()
# def read_file(self, filename):
# last = os.popen('tail -1 '+filename).read().rsplit('\n')[0].split('\t')[2:] ### Implement
# # print(last)
# # with open(filename, mode = 'r', encoding = 'utf-8') as fh:
# # last = fh.readlines()[-1].rsplit('\n')[0].split('\t')[2:]
# ve = float(last[0])/1000.
# ig = float(last[1])*1000
# ic = float(last[2])*1000
# return [ve,ig,ic]
def get_mydata(self, dt):
self.val[0] = self.to_val
### Modify values digits
self.val[0, 1:] = self.val[0,1:] * (1e-3, 1e+3, 1e+3, 1)
if self.val[0,4] != 0:
self.val[0, 4] = np.log10(self.val[0, 4])
else:
self.val[0, 4] = -10#np.log10(self.val[0, 4])
# Reset time
self.val[:,0] = self.t_lapse
output1 = self.val[:,(0,1)].tolist() # for (t, Ve)
output2 = self.val[:,(0,2)].tolist() # for (t, Ig)
output3 = self.val[:,(0,3)].tolist() # for (t, Ic)
output4 = self.val[:,(0,4)].tolist() # for (t, P)
self.plot[0].points = output1
self.plot[1].points = output2
self.plot[2].points = output3
self.plot[3].points = output4
self.val = np.roll(self.val, 1, axis=0)
def format_val(self, val):
return '{0:.3f}'.format(val)
def _make_random_data(self):
## valに値を代入する. 例では乱数を入れている.
self.val = [random.random()+0.2, random.random(), random.random()-0.2]
return self.val
class StoreValue(BoxLayout):
"""Store measured values to file
"""
sv = ObjectProperty()
Ve_value = NumericProperty()
Ig_value = NumericProperty()
Ic_value = NumericProperty()
P_value = NumericProperty()
is_random = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
def start_random(self):
self.is_random = True
print('start_random is pressed')
Clock.schedule_interval(self.make_random_data, 1)
def stop_random(self):
self.is_random = False
Clock.unschedule(self.make_random_data)
# @classmethod
def make_random_data(self, dt):
## valに値を代入する. 例では乱数を入れている.
self.Ve_value, self.Ig_value, self.Ic_value = random.random()+0.2, random.random(), random.random()-0.2
#### 値をMyGraphに渡す!
MyGraph.to_val = [self.Ve_value, self.Ig_value, self.Ic_value]
print(self.Ve_value, self.Ig_value, self.Ic_value)
# return
@classmethod
def append_to_file(cls, filename, data1d):
## ファイルにデータ書き込み
datastr = ''
with open(filename, mode = 'a', encoding = 'utf-8') as fh:
for data in data1d:
datastr += '\t'+str(data)
fh.write(str(cls.get_ctime()) + datastr + '\n')
@classmethod
def get_ctime(self):
t = dtm.datetime.now()
point = (t.microsecond - t.microsecond%10000)/10000
app_time = "{0:%y%m%d-%H:%M:%S}.{1:.0f}".format(t, point)
return app_time
class IlislifeApp(App):
pass
# def build(self):
# # self.screens["wordcomp"].bind(count_r=self.screens["score"].setter('score'))
# # self.MyGraph(app=self).bind(Ve_value=self.MainView(app=self).setter('Ve_value'))
# return MyRoot()
if __name__ == '__main__':
IlislifeApp().run()
| mit |
mfjb/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/series/methods/test_asfreq.py | 4 | 3616 | from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, DatetimeIndex, Series, date_range, period_range
import pandas._testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd
class TestAsFreq:
# TODO: de-duplicate/parametrize or move DataFrame test
def test_asfreq_ts(self):
index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq("D", how="end")
df_result = df.asfreq("D", how="end")
exp_index = index.asfreq("D", how="end")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, exp_index)
tm.assert_index_equal(df_result.index, exp_index)
result = ts.asfreq("D", how="start")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_aware_asfreq(self, tz):
dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz)
ser = Series(np.random.randn(len(dr)), index=dr)
# it works!
ser.asfreq("T")
def test_asfreq(self):
ts = Series(
[0.0, 1.0, 2.0],
index=DatetimeIndex(
[
datetime(2009, 10, 30),
datetime(2009, 11, 30),
datetime(2009, 12, 31),
],
freq="BM",
),
)
daily_ts = ts.asfreq("B")
monthly_ts = daily_ts.asfreq("BM")
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq("B", method="pad")
monthly_ts = daily_ts.asfreq("BM")
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq(BDay())
monthly_ts = daily_ts.asfreq(BMonthEnd())
tm.assert_series_equal(monthly_ts, ts)
result = ts[:0].asfreq("M")
assert len(result) == 0
assert result is not ts
daily_ts = ts.asfreq("D", fill_value=-1)
result = daily_ts.value_counts().sort_index()
expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
tm.assert_series_equal(result, expected)
def test_asfreq_datetimeindex_empty_series(self):
# GH#14320
index = DatetimeIndex(["2016-09-29 11:00"])
expected = Series(index=index, dtype=object).asfreq("H")
result = Series([3], index=index.copy()).asfreq("H")
tm.assert_index_equal(expected.index, result.index)
def test_asfreq_keep_index_name(self):
# GH#9854
index_name = "bar"
index = date_range("20130101", periods=20, name=index_name)
df = DataFrame(list(range(20)), columns=["foo"], index=index)
assert index_name == df.index.name
assert index_name == df.asfreq("10D").index.name
def test_asfreq_normalize(self):
rng = date_range("1/1/2000 09:30", periods=20)
norm = date_range("1/1/2000", periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq("D", normalize=True)
norm = date_range("1/1/2000", periods=20)
expected = Series(vals, index=norm)
tm.assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq("D", normalize=True)
expected = DataFrame(vals, index=norm)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
CETodd/4501project | mrf_th.py | 1 | 26607 | from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import scipy.interpolate
import scipy.ndimage
import numpy as np
import time
import argparse
import warnings
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Conv2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('style_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype ='float32')
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size)
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print ("[congrid] dimensions error. "
"This routine currently only support "
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = [i for i in range(np.rank(newcoords))]
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n",
"Currently only \'neighbour\', \'nearest\',\'linear\',",
"and \'spline\' are supported.")
return None
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.style_image_paths
style_image_paths = [path for path in args.style_image_paths]
result_prefix = args.result_prefix
content_weight = args.content_weight
total_variation_weight = args.tv_weight
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
read_mode = "color"
style_weights = []
if len(style_image_paths) != len(args.style_weight):
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
style_weights = [weight*args.style_scale for weight in args.style_weight]
def pooling_func(x):
# return AveragePooling2D((2, 2), strides=(2, 2))(x)
return MaxPooling2D((2, 2), strides=(2, 2))(x)
#start proc_img
def preprocess_image(image_path, load_dims=False):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB"
# mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
img_height = int(img_width * aspect_ratio)
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
base_image = K.variable(preprocess_image(base_image_path, True))
style_reference_images = [K.variable(preprocess_image(path)) for path in style_image_paths]
# this will contain our generated image
combination_image = K.placeholder((1, img_width, img_height, 3)) # tensorflow
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
#build the model
model_input = Input(tensor=input_tensor, shape=shape)
# build the VGG16 network with our 3 images as input
x = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_1', padding='same')(model_input)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
x = pooling_func(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
x = pooling_func(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
x = pooling_func(x)
model = Model(model_input, x)
if K.image_dim_ordering() == "th":
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
print("Weights Path: ", weights)
model.load_weights(weights)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
def make_patches(x, patch_size, patch_stride):
from theano.tensor.nnet.neighbours import images2neibs
'''Break image `x` up into a bunch of patches.'''
patches = images2neibs(x,
(patch_size, patch_size), (patch_stride, patch_stride),
mode='valid')
# neibs are sorted per-channel
patches = K.reshape(patches, (K.shape(x)[1], K.shape(patches)[0] // K.shape(x)[1], patch_size, patch_size))
patches = K.permute_dimensions(patches, (1, 0, 2, 3))
patches_norm = K.sqrt(K.sum(K.square(patches), axis=(1,2,3), keepdims=True))
return patches, patches_norm
def find_patch_matches(comb, comb_norm, ref):
'''For each patch in combination, find the best matching patch in reference'''
# we want cross-correlation here so flip the kernels
convs = K.conv2d(comb, ref[:, :, ::-1, ::-1], border_mode='valid')
argmax = K.argmax(convs / comb_norm, axis=1)
return argmax
def mrf_loss(source, combination, patch_size=3, patch_stride=1):
'''CNNMRF http://arxiv.org/pdf/1601.04589v1.pdf'''
# extract patches from feature maps
source = K.expand_dims(source, 0)
combination = K.expand_dims(combination, 0)
combination_patches, combination_patches_norm = make_patches(K.variable(combination).eval, patch_size, patch_stride)
source_patches, source_patches_norm = make_patches(K.variable(source).eval, patch_size, patch_stride)
# find best patches and calculate loss
patch_ids = find_patch_matches(combination_patches, combination_patches_norm, source_patches / source_patches_norm)
best_source_patches = K.reshape(source_patches[patch_ids], K.shape(combination_patches))
loss = K.sum(K.square(best_source_patches - combination_patches)) / patch_size ** 2
return loss
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1 / (2. * channels ** 0.5 * size ** 0.5)
elif args.content_loss_type == 2:
multiplier = 1 / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
channel_index = -1
#Style Loss calculation
mrf_layers = ['conv3_1', 'conv4_1']
# feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in mrf_layers:
output_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = output_features[nb_tensors - 1, :, :, :]
style_features = output_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(mrf_loss(style_features[j], combination_features))
for j in range(nb_style_images):
loss += (style_weights[j] / len(mrf_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# # this Evaluator class makes it possible
# # to compute loss and gradients in one pass
# # while retrieving them via two separate functions,
# # "loss" and "grads". This is done because scipy.optimize
# # requires separate functions for loss and gradients,
# # but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image)
num_iter = args.num_iter
prev_min_val = -1
# for scaled_img in img_pyramid:
# image_tensors = [base_image]
# for style_image_tensor in style_reference_images:
# image_tensors.append(style_image_tensor)
# image_tensors.append(combination_image)
# input_tensor = K.concatenate(image_tensors, axis=0)
# model_input = Input(tensor=input_tensor, shape=shape)
for img_scale in scaled_imgs:
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
# build the model
model_input = Input(tensor=input_tensor, shape=shape)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp="bilinear")
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
| mit |
HWal/paparazzi | sw/misc/attitude_reference/pat/utils.py | 42 | 6283 | #
# Copyright 2013-2014 Antoine Drouin ([email protected])
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions
"""
import math
import numpy as np
import numpy.linalg as linalg
import pdb
"""
Unit convertions
"""
def rad_of_deg(d):
return d / 180. * math.pi
def sqrad_of_sqdeg(d):
return d / (180. * math.pi) ** 2
def deg_of_rad(r):
return r * 180. / math.pi
def sqdeg_of_sqrad(r):
return r * (180. / math.pi) ** 2
def rps_of_rpm(r):
return r * 2. * math.pi / 60.
def rpm_of_rps(r):
return r / 2. / math.pi * 60.
# http://en.wikipedia.org/wiki/Nautical_mile
def m_of_NM(nm):
return nm * 1852.
def NM_of_m(m):
return m / 1852.
# http://en.wikipedia.org/wiki/Knot_(speed)
def mps_of_kt(kt):
return kt * 0.514444
def kt_of_mps(mps):
return mps / 0.514444
# http://en.wikipedia.org/wiki/Foot_(unit)
def m_of_ft(ft):
return ft * 0.3048
def ft_of_m(m):
return m / 0.3048
# feet per minute to/from meters per second
def ftpm_of_mps(mps):
return mps * 60. * 3.28084
def mps_of_ftpm(ftpm):
return ftpm / 60. / 3.28084
"""
Cliping
"""
def norm_angle_0_2pi(a):
while a > 2. * math.pi:
a -= 2. * math.pi
while a <= 0:
a += 2. * math.pi
return a
def norm_angle_mpi_pi(a):
while a > math.pi:
a -= 2. * math.pi
while a <= -math.pi:
a += 2. * math.pi
return a
#
def saturate(_v, _min, _max):
if _v < _min:
return _min
if _v > _max:
return _max
return _v
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color': 'k', 'fontsize': 20}
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig is None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend is not None:
ax.legend(legend, loc='best')
if xlim is not None:
ax.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
def ensure_ylim(ax, yspan):
ymin, ymax = ax.get_ylim()
if ymax - ymin < yspan:
ym = (ymin + ymax) / 2
ax.set_ylim(ym - yspan / 2, ym + yspan / 2)
def write_text(nrows, ncols, plot_number, text, colspan=1, loc=[[0.5, 9.7]], filename=None):
# ax = plt.subplot(nrows, ncols, plot_number)
gs = gridspec.GridSpec(nrows, ncols)
row, col = divmod(plot_number - 1, ncols)
ax = plt.subplot(gs[row, col:col + colspan])
plt.axis([0, 10, 0, 10])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(0, len(text)):
plt.text(loc[i][0], loc[i][1], text[i], ha='left', va='top')
save_if(filename)
def plot_in_grid(time, plots, ncol, figure=None, window_title="None", legend=None, filename=None,
margins=(0.04, 0.08, 0.93, 0.96, 0.20, 0.34)):
nrow = math.ceil(len(plots) / float(ncol))
figsize = (10.24 * ncol, 2.56 * nrow)
figure = prepare_fig(figure, window_title, figsize=figsize, margins=margins)
# pdb.set_trace()
for i, (title, ylab, data) in enumerate(plots):
ax = figure.add_subplot(nrow, ncol, i + 1)
ax.plot(time, data)
decorate(ax, title=title, ylab=ylab)
if legend is not None:
ax.legend(legend, loc='best')
save_if(filename)
return figure
"""
Misc
"""
def num_jacobian(X, U, P, dyn):
s_size = len(X)
i_size = len(U)
epsilonX = (0.1 * np.ones(s_size)).tolist()
dX = np.diag(epsilonX)
A = np.zeros((s_size, s_size))
for i in range(0, s_size):
dx = dX[i, :]
delta_f = dyn(X + dx / 2, 0, U, P) - dyn(X - dx / 2, 0, U, P)
delta_f = delta_f / dx[i]
# print delta_f
A[:, i] = delta_f
epsilonU = (0.1 * np.ones(i_size)).tolist()
dU = np.diag(epsilonU)
B = np.zeros((s_size, i_size))
for i in range(0, i_size):
du = dU[i, :]
delta_f = dyn(X, 0, U + du / 2, P) - dyn(X, 0, U - du / 2, P)
delta_f = delta_f / du[i]
B[:, i] = delta_f
return A, B
def saturate(V, Sats):
Vsat = np.array(V)
for i in range(0, len(V)):
if Vsat[i] < Sats[i, 0]:
Vsat[i] = Sats[i, 0]
elif Vsat[i] > Sats[i, 1]:
Vsat[i] = Sats[i, 1]
return Vsat
def print_lti_dynamics(A, B, txt=None, print_original_form=False, print_modal_form=False):
if txt:
print txt
if print_original_form:
print "A\n", A
print "B\n", B
w, M = np.linalg.eig(A)
print "modes \n", w
if print_modal_form:
# print "eigen vectors\n", M
# invM = np.linalg.inv(M)
# print "invM\n", invM
# Amod = np.dot(np.dot(invM, A), M)
# print "Amod\n", Amod
for i in range(len(w)):
print w[i], "->", M[:, i]
| gpl-2.0 |
smartscheduling/scikit-learn-categorical-tree | sklearn/datasets/twenty_newsgroups.py | 1 | 13426 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warn("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
mantidproject/mantid | qt/applications/workbench/workbench/plotting/test/test_figureinteraction.py | 3 | 31775 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
# system imports
import unittest
# third-party library imports
import matplotlib
matplotlib.use('AGG') # noqa
import matplotlib.pyplot as plt
import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QMenu
from testhelpers import assert_almost_equal
# local package imports
from mantid.plots import MantidAxes
from unittest.mock import MagicMock, PropertyMock, call, patch
from mantid.simpleapi import CreateWorkspace
from mantidqt.plotting.figuretype import FigureType
from mantidqt.plotting.functions import plot, pcolormesh_from_names, plot_contour, pcolormesh
from mantidqt.utils.qt.testing import start_qapplication
from workbench.plotting.figureinteraction import FigureInteraction, LogNorm
@start_qapplication
class FigureInteractionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ws = CreateWorkspace(
DataX=np.array([10, 20, 30], dtype=np.float64),
DataY=np.array([2, 3], dtype=np.float64),
DataE=np.array([0.02, 0.02], dtype=np.float64),
Distribution=False,
UnitX='Wavelength',
YUnitLabel='Counts',
OutputWorkspace='ws')
cls.ws1 = CreateWorkspace(
DataX=np.array([11, 21, 31], dtype=np.float64),
DataY=np.array([3, 4], dtype=np.float64),
DataE=np.array([0.03, 0.03], dtype=np.float64),
Distribution=False,
UnitX='Wavelength',
YUnitLabel='Counts',
OutputWorkspace='ws1')
# initialises the QApplication
super(cls, FigureInteractionTest).setUpClass()
@classmethod
def tearDownClass(cls):
cls.ws.delete()
cls.ws1.delete()
def setUp(self):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
fig_manager.fit_browser.tool = None
self.interactor = FigureInteraction(fig_manager)
self.fig, self.ax = plt.subplots() # type: matplotlib.figure.Figure, MantidAxes
def tearDown(self):
plt.close('all')
del self.fig
del self.ax
del self.interactor
# Success tests
def test_construction_registers_handler_for_button_press_event(self):
fig_manager = MagicMock()
fig_manager.canvas = MagicMock()
interactor = FigureInteraction(fig_manager)
expected_call = [
call('button_press_event', interactor.on_mouse_button_press),
call('button_release_event', interactor.on_mouse_button_release),
call('draw_event', interactor.draw_callback),
call('motion_notify_event', interactor.motion_event),
call('resize_event', interactor.mpl_redraw_annotations),
call('figure_leave_event', interactor.on_leave),
call('axis_leave_event', interactor.on_leave),
call('scroll_event', interactor.on_scroll)
]
fig_manager.canvas.mpl_connect.assert_has_calls(expected_call)
self.assertEqual(len(expected_call), fig_manager.canvas.mpl_connect.call_count)
def test_disconnect_called_for_each_registered_handler(self):
fig_manager = MagicMock()
canvas = MagicMock()
fig_manager.canvas = canvas
interactor = FigureInteraction(fig_manager)
interactor.disconnect()
self.assertEqual(interactor.nevents, canvas.mpl_disconnect.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_no_context_menu_for_empty_figure(self, mocked_figure_type,
mocked_qmenu):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mocked_figure_type.return_value = FigureType.Empty
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, mocked_qmenu.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_context_menu_for_color_plot(self, mocked_figure_type,
mocked_qmenu):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mocked_figure_type.return_value = FigureType.Image
# Expect a call to QMenu() for the outer menu followed by three more calls
# for the Axes, Normalization and Colorbar menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup',
autospec=True):
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call("Axes", qmenu_call1),
call("Normalization", qmenu_call1),
call("Color bar", qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu.call_args_list)
# 4 actions in Axes submenu
self.assertEqual(4, qmenu_call2.addAction.call_count)
# 2 actions in Normalization submenu
self.assertEqual(2, qmenu_call3.addAction.call_count)
# 2 actions in Colorbar submenu
self.assertEqual(2, qmenu_call4.addAction.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_context_menu_for_plot_without_fit_enabled(self, mocked_figure_type,
mocked_qmenu_cls):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
fig_manager.fit_browser.tool = None
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mouse_event.inaxes.get_xlim.return_value = (1, 2)
mouse_event.inaxes.get_ylim.return_value = (1, 2)
mouse_event.inaxes.lines = []
mocked_figure_type.return_value = FigureType.Line
# Expect a call to QMenu() for the outer menu followed by two more calls
# for the Axes and Normalization menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu_cls.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup',
autospec=True):
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
with patch.object(interactor, 'add_error_bars_menu', MagicMock()):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addSeparator.call_count)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call("Axes", qmenu_call1),
call("Normalization", qmenu_call1),
call("Markers", qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu_cls.call_args_list)
# 4 actions in Axes submenu
self.assertEqual(4, qmenu_call2.addAction.call_count)
# 2 actions in Normalization submenu
self.assertEqual(2, qmenu_call3.addAction.call_count)
# 3 actions in Markers submenu
self.assertEqual(3, qmenu_call4.addAction.call_count)
def test_toggle_normalization_no_errorbars(self):
self._test_toggle_normalization(errorbars_on=False, plot_kwargs={'distribution': True})
def test_toggle_normalization_with_errorbars(self):
self._test_toggle_normalization(errorbars_on=True, plot_kwargs={'distribution': True})
def test_correct_yunit_label_when_overplotting_after_normalization_toggle(self):
# The earlier version of Matplotlib on RHEL throws an error when performing the second
# plot in this test, if the lines have errorbars. The error occurred when it attempted
# to draw an interactive legend. Plotting without errors still fulfills the purpose of this
# test, so turn them off for old Matplotlib versions.
errors = True
if int(matplotlib.__version__[0]) < 2:
errors = False
fig = plot([self.ws], spectrum_nums=[1], errors=errors,
plot_kwargs={'distribution': True})
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
ax = fig.axes[0]
fig_interactor._toggle_normalization(ax)
self.assertEqual(r"Counts ($\AA$)$^{-1}$", ax.get_ylabel())
plot([self.ws1], spectrum_nums=[1], errors=errors, overplot=True, fig=fig)
self.assertEqual(r"Counts ($\AA$)$^{-1}$", ax.get_ylabel())
def test_normalization_toggle_with_no_autoscale_on_update_no_errors(self):
self._test_toggle_normalization(errorbars_on=False,
plot_kwargs={'distribution': True, 'autoscale_on_update': False})
def test_normalization_toggle_with_no_autoscale_on_update_with_errors(self):
self._test_toggle_normalization(errorbars_on=True,
plot_kwargs={'distribution': True, 'autoscale_on_update': False})
def test_add_error_bars_menu(self):
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
main_menu = QMenu()
self.interactor.add_error_bars_menu(main_menu, self.ax)
# Check the expected sub-menu with buttons is added
added_menu = main_menu.children()[1]
self.assertTrue(
any(FigureInteraction.SHOW_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
self.assertTrue(
any(FigureInteraction.HIDE_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
def test_context_menu_not_added_for_scripted_plot_without_errors(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.plot([0, 15000], [0, 14000], label='MyLabel 2')
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added
self.interactor.add_error_bars_menu(main_menu, self.ax)
# number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_scripted_plot_line_without_label_handled_properly(self):
# having the special nolabel is usually present on lines with errors,
# but sometimes can be present on lines without errors, this test covers that case
self.ax.plot([0, 15000], [0, 15000], label='_nolegend_')
self.ax.plot([0, 15000], [0, 15000], label='_nolegend_')
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added for error bars
self.interactor.add_error_bars_menu(main_menu, self.ax)
# number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_context_menu_added_for_scripted_plot_with_errors(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added
self.interactor.add_error_bars_menu(main_menu, self.ax)
added_menu = main_menu.children()[1]
# actions should have been added now, which for this case are only `Show all` and `Hide all`
self.assertTrue(
any(FigureInteraction.SHOW_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
self.assertTrue(
any(FigureInteraction.HIDE_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
def test_context_menu_includes_plot_type_if_plot_has_multiple_lines(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
self.interactor._add_plot_type_option_menu(main_menu, self.ax)
added_menu = main_menu.children()[1]
self.assertEqual(added_menu.children()[0].text(), "Plot Type")
def test_context_menu_does_not_include_plot_type_if_plot_has_one_line(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.errorbar([0, 1], [0, 1], capsize=1)
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
self.interactor._add_plot_type_option_menu(main_menu, self.ax)
# Number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_scripted_plot_show_and_hide_all(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
anonymous_menu = QMenu()
# this initialises some of the class internals
self.interactor.add_error_bars_menu(anonymous_menu, self.ax)
self.assertTrue(self.ax.containers[0][2][0].get_visible())
self.interactor.errors_manager.toggle_all_errors(self.ax, make_visible=False)
self.assertFalse(self.ax.containers[0][2][0].get_visible())
# make the menu again, this updates the internal state of the errors manager
# and is what actually happens when the user opens the menu again
self.interactor.add_error_bars_menu(anonymous_menu, self.ax)
self.interactor.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.assertTrue(self.ax.containers[0][2][0].get_visible())
def test_no_normalisation_options_on_non_workspace_plot(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.plot([1, 2], [1, 2], label="myLabel")
anonymous_menu = QMenu()
self.assertEqual(None, self.interactor._add_normalization_option_menu(anonymous_menu, self.ax))
# Failure tests
def test_construction_with_non_qt_canvas_raises_exception(self):
class NotQtCanvas(object):
pass
class FigureManager(object):
def __init__(self):
self.canvas = NotQtCanvas()
self.assertRaises(RuntimeError, FigureInteraction, FigureManager())
def test_context_menu_change_axis_scale_is_axis_aware(self):
fig = plot([self.ws, self.ws1], spectrum_nums=[1, 1], tiled=True)
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
scale_types = ("log", "log")
ax = fig.axes[0]
ax1 = fig.axes[1]
current_scale_types = (ax.get_xscale(), ax.get_yscale())
current_scale_types1 = (ax1.get_xscale(), ax1.get_yscale())
self.assertEqual(current_scale_types, current_scale_types1)
fig_interactor._quick_change_axes(scale_types, ax)
current_scale_types2 = (ax.get_xscale(), ax.get_yscale())
self.assertNotEqual(current_scale_types2, current_scale_types1)
def test_scale_on_ragged_workspaces_maintained_when_toggling_normalisation(self):
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="ragged_ws")
fig = pcolormesh_from_names([ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._toggle_normalization(fig.axes[0])
clim = fig.axes[0].images[0].get_clim()
fig_interactor._toggle_normalization(fig.axes[0])
self.assertEqual(clim, fig.axes[0].images[0].get_clim())
self.assertNotEqual((-0.1, 0.1), fig.axes[0].images[0].get_clim())
def test_log_maintained_when_normalisation_toggled(self):
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="ragged_ws")
fig = pcolormesh_from_names([ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._change_colorbar_axes(LogNorm)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertTrue(isinstance(fig.axes[0].images[-1].norm, LogNorm))
@patch('workbench.plotting.figureinteraction.QMenu', autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type', autospec=True)
def test_right_click_gives_marker_menu_when_hovering_over_one(self, mocked_figure_type, mocked_qmenu_cls):
mouse_event = self._create_mock_right_click()
mouse_event.inaxes.get_xlim.return_value = (1, 2)
mouse_event.inaxes.get_ylim.return_value = (1, 2)
mocked_figure_type.return_value = FigureType.Line
marker1 = MagicMock()
marker2 = MagicMock()
marker3 = MagicMock()
self.interactor.markers = [marker1, marker2, marker3]
for marker in self.interactor.markers:
marker.is_above.return_value = True
# Expect a call to QMenu() for the outer menu followed by two more calls
# for the Axes and Normalization menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu_cls.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup', autospec=True):
with patch.object(self.interactor.toolbar_manager, 'is_tool_active', lambda: False):
with patch.object(self.interactor, 'add_error_bars_menu', MagicMock()):
self.interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addSeparator.call_count)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call(marker1.name, qmenu_call1),
call(marker2.name, qmenu_call1),
call(marker3.name, qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu_cls.call_args_list)
# 2 Actions in marker menu
self.assertEqual(2, qmenu_call2.addAction.call_count)
self.assertEqual(2, qmenu_call3.addAction.call_count)
self.assertEqual(2, qmenu_call4.addAction.call_count)
@patch('workbench.plotting.figureinteraction.SingleMarker')
def test_adding_horizontal_marker_adds_correct_marker(self, mock_marker):
y0, y1 = 0, 1
data = MagicMock()
axis = MagicMock()
self.interactor._add_horizontal_marker(data, y0, y1, axis)
expected_call = call(self.interactor.canvas, '#2ca02c', data, y0, y1,
name='marker 0',
marker_type='YSingle',
line_style='dashed',
axis=axis)
self.assertEqual(1, mock_marker.call_count)
mock_marker.assert_has_calls([expected_call])
@patch('workbench.plotting.figureinteraction.SingleMarker')
def test_adding_vertical_marker_adds_correct_marker(self, mock_marker):
x0, x1 = 0, 1
data = MagicMock()
axis = MagicMock()
self.interactor._add_vertical_marker(data, x0, x1, axis)
expected_call = call(self.interactor.canvas, '#2ca02c', data, x0, x1,
name='marker 0',
marker_type='XSingle',
line_style='dashed',
axis=axis)
self.assertEqual(1, mock_marker.call_count)
mock_marker.assert_has_calls([expected_call])
def test_delete_marker_does_not_delete_markers_if_not_present(self):
marker = MagicMock()
self.interactor.markers = []
self.interactor._delete_marker(marker)
self.assertEqual(0, self.interactor.canvas.draw.call_count)
self.assertEqual(0, marker.marker.remove.call_count)
self.assertEqual(0, marker.remove_all_annotations.call_count)
def test_delete_marker_preforms_correct_cleanup(self):
marker = MagicMock()
self.interactor.markers = [marker]
self.interactor._delete_marker(marker)
self.assertEqual(1, marker.marker.remove.call_count)
self.assertEqual(1, marker.remove_all_annotations.call_count)
self.assertEqual(1, self.interactor.canvas.draw.call_count)
self.assertNotIn(marker, self.interactor.markers)
@patch('workbench.plotting.figureinteraction.SingleMarkerEditor')
@patch('workbench.plotting.figureinteraction.QApplication')
def test_edit_marker_opens_correct_editor(self, mock_qapp, mock_editor):
marker = MagicMock()
expected_call = [call(self.interactor.canvas,
marker,
self.interactor.valid_lines,
self.interactor.valid_colors,
[])]
self.interactor._edit_marker(marker)
self.assertEqual(1, mock_qapp.restoreOverrideCursor.call_count)
mock_editor.assert_has_calls(expected_call)
@patch('workbench.plotting.figureinteraction.GlobalMarkerEditor')
def test_global_edit_marker_opens_correct_editor(self, mock_editor):
marker = MagicMock()
self.interactor.markers = [marker]
expected_call = [call(self.interactor.canvas, [marker],
self.interactor.valid_lines,
self.interactor.valid_colors)]
self.interactor._global_edit_markers()
mock_editor.assert_has_calls(expected_call)
def test_motion_event_returns_if_toolbar_has_active_tools(self):
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=True)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(MagicMock())
self.assertEqual(0, self.interactor._set_hover_cursor.call_count)
def test_motion_event_returns_if_fit_active(self):
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=True)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(MagicMock())
self.assertEqual(0, self.interactor._set_hover_cursor.call_count)
def test_motion_event_changes_cursor_and_draws_canvas_if_any_marker_is_moving(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
for marker in markers:
marker.mouse_move.return_value = True
event = MagicMock()
event.xdata = 1
event.ydata = 2
self.interactor.markers = markers
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=False)
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=False)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(event)
self.interactor._set_hover_cursor.assert_has_calls([call(1, 2)])
self.assertEqual(1, self.interactor.canvas.draw.call_count)
def test_motion_event_changes_cursor_and_does_not_draw_canvas_if_no_marker_is_moving(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
for marker in markers:
marker.mouse_move.return_value = False
event = MagicMock()
event.xdata = 1
event.ydata = 2
self.interactor.markers = markers
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=False)
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=False)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(event)
self.interactor._set_hover_cursor.assert_has_calls([call(1, 2)])
self.assertEqual(0, self.interactor.canvas.draw.call_count)
def test_redraw_annotations_removes_and_adds_all_annotations_for_all_markers(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
call_list = [call.remove_all_annotations(), call.add_all_annotations()]
self.interactor.markers = markers
self.interactor.redraw_annotations()
for marker in markers:
marker.assert_has_calls(call_list)
def test_mpl_redraw_annotations_does_not_redraw_if_event_does_not_have_a_button_attribute(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock(spec='no_button')
event.no_button = MagicMock(spec='no_button')
self.interactor.mpl_redraw_annotations(event.no_button)
self.assertEqual(0, self.interactor.redraw_annotations.call_count)
def test_mpl_redraw_annotations_does_not_redraw_if_event_button_not_pressed(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock()
event.button = None
self.interactor.mpl_redraw_annotations(event)
self.assertEqual(0, self.interactor.redraw_annotations.call_count)
def test_mpl_redraw_annotations_redraws_if_button_pressed(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock()
self.interactor.mpl_redraw_annotations(event)
self.assertEqual(1, self.interactor.redraw_annotations.call_count)
def test_toggle_normalisation_on_contour_plot_maintains_contour_line_colour(self):
from mantid.plots.legend import convert_color_to_hex
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="test_ws")
fig = plot_contour([ws])
for col in fig.get_axes()[0].collections:
col.set_color("#ff9900")
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertTrue(all(convert_color_to_hex(col.get_color()[0]) == "#ff9900"
for col in fig.get_axes()[0].collections))
def test_toggle_normalisation_applies_to_all_images_if_one_colorbar(self):
fig = pcolormesh([self.ws, self.ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
# there should be 3 axes, 2 colorplots and 1 colorbar
self.assertEqual(3, len(fig.axes))
fig.axes[0].tracked_workspaces.values()
self.assertTrue(fig.axes[0].tracked_workspaces['ws'][0].is_normalized)
self.assertTrue(fig.axes[1].tracked_workspaces['ws'][0].is_normalized)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertFalse(fig.axes[0].tracked_workspaces['ws'][0].is_normalized)
self.assertFalse(fig.axes[1].tracked_workspaces['ws'][0].is_normalized)
# Private methods
def _create_mock_fig_manager_to_accept_right_click(self):
fig_manager = MagicMock()
canvas = MagicMock()
type(canvas).buttond = PropertyMock(return_value={Qt.RightButton: 3})
fig_manager.canvas = canvas
return fig_manager
def _create_mock_right_click(self):
mouse_event = MagicMock(inaxes=MagicMock(spec=MantidAxes, collections = [], creation_args = [{}]))
type(mouse_event).button = PropertyMock(return_value=3)
return mouse_event
def _test_toggle_normalization(self, errorbars_on, plot_kwargs):
fig = plot([self.ws], spectrum_nums=[1], errors=errorbars_on,
plot_kwargs=plot_kwargs)
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
# Earlier versions of matplotlib do not store the data assciated with a
# line with high precision and hence we need to set a lower tolerance
# when making comparisons of this data
if matplotlib.__version__ < "2":
decimal_tol = 1
else:
decimal_tol = 7
ax = fig.axes[0]
fig_interactor._toggle_normalization(ax)
assert_almost_equal(ax.lines[0].get_xdata(), [15, 25])
assert_almost_equal(ax.lines[0].get_ydata(), [0.2, 0.3], decimal=decimal_tol)
self.assertEqual("Counts ($\\AA$)$^{-1}$", ax.get_ylabel())
fig_interactor._toggle_normalization(ax)
assert_almost_equal(ax.lines[0].get_xdata(), [15, 25])
assert_almost_equal(ax.lines[0].get_ydata(), [2, 3], decimal=decimal_tol)
self.assertEqual("Counts", ax.get_ylabel())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
chibbargroup/CentralRepository | FLOURescence/Source/PlotFigures.py | 2 | 8346 | '''
Plotter.py
Written by J. Hayes; last editted 2/3/2017
Purpose: A script to plot the XRF spectra and their corresponding fits. Also plots the calibration curves
Notes:
Uses pyplot from matplotlib; will ahave to include when building the executable
The main engine for the spectra plotting is Process_Fit_Spectra
The main engine for plotting the calibration curves is Process_Calibration_Curves
Main_Plotter runs both of the above in one shot (calibratin curves first, then XRF spectra)
Input:
1) calibration_dir - directory containing the calibration curve file and cal_concentratino file
2) cal_measure_dir - directory containing the combined calibration measurement files
3) spectra_dir - directory containing the XRF spectra and fit files
4) header_file - file containing the sample label information
5) output_dir - directory in which the output should be saved (folders inside this directory will be created)
Output:
Spectra and fits will be saved in output_dir/spectra_plots; plots for each plate will be saved in directories with the plate name
Calibration curves will be saved in calibration_dir/cal_curve_plots
'''
import pandas as pd
import numpy as np
from os import listdir, mkdir
from os.path import isfile, join, isdir, basename
import matplotlib.pyplot as plt
#Make a directory for saving files in if it doesn't already exist; returns the full directory path
def Make_Save_Directory(output_dir, directory_name):
directory = join(output_dir, directory_name)
if not isdir(directory):
mkdir(directory)
return directory
#Relabel the columns in a spectra file to the sample names
def Relabel_Spectra_Columns(spectrum_file, plate_name, header_file):
headers = pd.read_csv(header_file, index_col = 0)
spectra = pd.read_csv(spectrum_file, index_col = False)
plate_name = basename(spectrum_file).split('_')[0]
for plate in headers.index:
if plate.lower() == plate_name.lower():
sample_names = headers.ix[plate, :]
new_spectra_labels = []
for name in sample_names:
name = str(name)
new_spectra_labels.append(name)
new_spectra_labels.append(name)
new_spectra_labels.append(name)
#Try to relabel the spectra columns, if something goes wrong, the program will note it
try:
spectra.columns = new_spectra_labels
spectra = Remove_Empty_Spectra(spectra)
except UnboundLocalError:
print("Hmm...something went wrong while labelling the spectra for plate %s" %plate_name)
return
return spectra
#Remove the spectra collected on the empty/blank cells from the spectra dataframe; takes dataframe, returns dataframe
def Remove_Empty_Spectra(data):
empty_aliases = ['empty', 'Empty', 'x', 'X', 'blank', 'Blank']
for column in set(data.columns): #Note: Use set because del data[column] will remove all instances of that column value
if column in empty_aliases:
del data[column]
return data
#Generate plots of each spectrum in a spectra file
def Spectra_Plotter(spectra, spectrum_file, output_dir):
plate_name = spectrum_file.split('_')[0]
save_dir = Make_Save_Directory(output_dir, plate_name)
i = 0
for column in spectra.columns:
col_type = i % 3
#Note range [2:] is result of first entry in column = energy, y_data, etc, second being blank
if col_type == 0:
energy = list(spectra.ix[:, i][2:])
elif col_type == 1:
y_data = list(spectra.ix[:, i][2:])
elif col_type == 2:
fit = list(spectra.ix[:, i][2:])
Make_Spectrum_Plot(energy, y_data, fit, column, save_dir)
i += 1
#Makes a single plot and saves it as output_dir/sample_name (note output_dir is whatever is specified when called)
def Make_Spectrum_Plot(energy, y_data, fit, sample_name, output_dir):
file_name = join(output_dir, sample_name) + '_0.png'
#Deal with sample repeat cases
i = 1
while isfile(file_name):
file_name = file_name.replace('.png', '')[:-2] + '_%s.png' %i
i += 1
plt.plot(energy, y_data)
plt.plot(energy, fit)
plt.xlabel('Energy (keV)')
plt.ylabel('Counts (a.u.)')
plt.title(sample_name)
print("Saving...%s" %file_name)
plt.savefig(file_name)
plt.clf()
#Script that processes the spectra files; master module for the spectra processing
def Process_Fit_Spectra(spectra_dir, header_file, output_dir):
spectra_files = [f for f in listdir(spectra_dir) if isfile(join(spectra_dir, f))]
#Setup output directories if they don't already exist
relabeled_spectra_dir = Make_Save_Directory(output_dir, "labeled_spectra")
spectra_plot_dir = Make_Save_Directory(output_dir, "spectra_plots")
for file in spectra_files:
plate_name = file.split('_')[0]
file_path = join(spectra_dir, file)
print("Relabelling the plate: %s" %plate_name)
spectra = Relabel_Spectra_Columns(file_path, plate_name, header_file)
spectra.to_csv(join(relabeled_spectra_dir, file), index = False)
print("Plotting spectra to %s" %spectra_plot_dir)
Spectra_Plotter(spectra, file, spectra_plot_dir)
#Read in the concentrations from the saved cal_concentrations.csv file
def Read_Concentrations(calibration_dir):
conc_file = join(calibration_dir, "cal_concentrations.csv")
conc_value_df = pd.read_csv(conc_file, header = None)
conc_values = np.array(conc_value_df[1])
return conc_values
#Read in the calibration curve data, return at dictionary of tuples: Element: (slope, intercept)
def Read_Calibration_Curves(calibration_dir):
calibration_curve_file = join(calibration_dir, "calibration_curves.csv")
cal_curves = pd.read_csv(calibration_curve_file, index_col = 0)
cal_curve_params = {}
for element in cal_curves:
cal_curve_params[element] = (cal_curves[element]['slope'], cal_curves[element]['intercept'])
return(cal_curve_params)
#Read the averaged calibration measurements, return as dictionary or arrays: Element: [cal 1, cal 2, ...]
def Read_Calibration_Measurements(data_dir):
cal_measurement_file = join(data_dir, 'calibration_data_avg.csv')
cal_measurement_df = pd.read_csv(cal_measurement_file, index_col = 0)
cal_measurements = {}
for element in cal_measurement_df.index:
cal_measurements[element] = np.array(cal_measurement_df.ix[element])
return cal_measurements
#Plot the measured data and the fitted curve
def Make_Cal_Curve_Plot(cal_conc, cal_curve, cal_measurements, element, output_dir):
file_name = join(output_dir, element + '.png')
print("Writing...%s" %file_name)
#Calculate plot points for calibration curve
cal_curve_y1 = cal_conc[0]*cal_curve[0] + cal_curve[1]
cal_curve_y2 = cal_conc[-1]*cal_curve[0] + cal_curve[1]
cal_measure_y = [cal_curve_y1, cal_curve_y2]
cal_measure_x = [cal_conc[0], cal_conc[-1]]
plt.plot(cal_measure_x, cal_measure_y)
#Deals with case where cal concs are not ordered from biggest to smallest
if cal_conc[-1] > cal_conc[0]:
cal_conc = np.flipud(cal_conc)
plt.scatter(cal_conc, cal_measurements)
else:
plt.scatter(cal_conc, cal_measurements)
plt.xlabel('Concentration (ppm)')
plt.ylabel('Peak Area (a.u.)')
plt.title(element)
plt.savefig(file_name)
plt.clf()
#Batch process calibration data; main engine for plotting calibration curves and data
def Process_Calibration_Curves(calibration_dir, measurement_dir):
output_dir = Make_Save_Directory(calibration_dir, "cal_curve_plots")
calibration_concs = Read_Concentrations(calibration_dir)
calibration_curves = Read_Calibration_Curves(calibration_dir)
calibration_measurements = Read_Calibration_Measurements(measurement_dir)
for element in calibration_measurements:
Make_Cal_Curve_Plot(calibration_concs, calibration_curves[element], calibration_measurements[element], element, output_dir)
#The engine that runs entire script (spectra plotting and calibration curve plotting)
def Main_Plotter(calibration_dir, cal_measure_dir, spectra_dir, header_file, output_dir):
Process_Calibration_Curves(calibration_dir, cal_measure_dir)
Process_Fit_Spectra(spectra_dir, header_file, output_dir)
'''
header_file = 'C:/Users/John/Desktop/Test/labels.csv'
spectra_dir = 'C:/Users/John/Desktop/Test/Output/Spectra'
output_dir = 'C:/Users/John/Desktop/Test/Output/'
calibration_dir = 'C:/Users/John/Desktop/Test/Output/calibration_results'
data_dir = 'C:/Users/John/Desktop/Test/Output/combined_files'
#Process_Fit_Spectra(spectra_dir, header_file, output_dir)
#Read_Concentrations(calibration_dir)
#df = Read_Calibration_Curves(calibration_dir)
Main_Plotter(calibration_dir, data_dir, spectra_dir, header_file, output_dir)
''' | mit |
cybernet14/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
CERNatschool/inverse-square-law | analysis.py | 1 | 9715 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================
# Analysis of the Inverse Square Law data
#=========================================
#
# See the README.md for more information.
# Import the code needed to manage files and sort the data.
import os, inspect, glob
from operator import itemgetter
# Import the plotting libraries.
import pylab as plt
import numpy as n
from matplotlib import rc
# Set the default tick label distance.
plt.rcParams['xtick.major.pad']='8'
plt.rcParams['ytick.major.pad']='8'
# Uncomment to use LaTeX for the plot text.
#rc('font',**{'family':'serif','serif':['Computer Modern']})
#rc('text', usetex=True)
# Import the clustering and web-page writing code.
from clustering import *
# Uncomment these if you have ROOT installed on your system.
#from ROOT import TGraphErrors
#from ROOT import TF1
# The class for handling data entries.
class Entry:
def __init__(self, r, dots, blobs, time):
self.r = r # [mm]
self.Ndots = dots
self.Nblobs = blobs
self.Dtime = time # [s]
def Nphot(self):
N = float(self.Ndots + self.Nblobs) / self.Dtime
return N
def NphotErr(self):
N = float(self.Ndots + self.Nblobs)
errN = n.sqrt(N)
return errN / self.Dtime
# Get the path of the current directory
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#=============================================================================
# The main program.
#=============================================================================
if __name__=="__main__":
print("===============================================")
print(" CERN@school Inverse Square Law Analysis ")
print("===============================================")
print("*")
# Create the dictionary for the data entries.
data = {}
# A JSON format data structure for our data files.
datafiles = [
{"r": 40.0, "path":"data/C08-W0082/2012/11/29/145250/", "t":66.8},
{"r": 50.0, "path":"data/C08-W0082/2012/11/29/141114/", "t":67.0},
{"r": 60.0, "path":"data/C08-W0082/2012/11/29/141512/", "t":67.0},
{"r": 70.0, "path":"data/C08-W0082/2012/11/29/141907/", "t":66.3},
{"r": 80.0, "path":"data/C08-W0082/2012/11/29/142422/", "t":66.4},
{"r": 90.0, "path":"data/C08-W0082/2012/11/29/142731/", "t":66.3},
{"r":100.0, "path":"data/C08-W0082/2012/11/29/143259/", "t":66.6},
{"r":110.0, "path":"data/C08-W0082/2012/11/29/143547/", "t":66.7},
{"r":120.0, "path":"data/C08-W0082/2012/11/29/143810/", "t":66.3},
{"r":130.0, "path":"data/C08-W0082/2012/11/29/144048/", "t":66.5},
{"r":140.0, "path":"data/C08-W0082/2012/11/29/144315/", "t":66.6},
{"r":150.0, "path":"data/C08-W0082/2012/11/29/144544/", "t":66.5},
]
print("* |-------------------------------------------|")
print("* | r/mm | Dots | Blobs | Others | Dt/s |")
print("* |-------------------------------------------|")
# Loop over the datasets sorted by radius.
for dataset in sorted(datafiles, key=itemgetter('r')):
#print("* Current dataset is '%s'" % (dataset['path']))
#print("* r = %5.1f [mm]." % (dataset['r']))
n_1 = 0; n_2 = 0; n_3 = 0; n_4 = 0
n_else = 0
# Loop over the datafiles and read the data.
for datafilename in glob.glob(dataset['path'] + "/*.txt"):
#print("*--> Data file is '%s'" % (datafilename))
# Open the file and read in the data.
f = open(datafilename, 'r')
payload = f.read()
f.close()
# Create a "dictionary" for the pixel information.
pixels = {}
# Loop over the X Y C values in the file and add them to the
# pixel dictionary.
for datarow in payload.splitlines():
#print dataline
v = datarow.split('\t') # Separates the x y C values
x = int(v[0]); y = int(v[1]); C = int(v[2])
X = 256 * y + x
pixels[X] = C
# Create a "BlobFinder" to cluster the pixels we've just extracted.
# See clustering.py for more about how this is done.
blob_finder = BlobFinder(pixels, 256, 256)
# Loop over the blobs found in the blob finder and record their
# properties for plotting.
for b in blob_finder.blob_list:
if b.get_size() == 1:
n_1 += 1
elif b.get_size() == 2:
n_2 += 1
elif b.get_size() == 3:
#print("*----> n_3, r = %f" % b.r_u)
if b.r_u < 0.75:
n_3 += 1
elif b.get_size() == 4:
#print("*----> n_4, r = %f" % b.r_u)
if b.r_u < 0.71:
n_4 += 1
else:
n_else += 1
# Update the user.
#print("* Number of dots = %6d" % (n_1 + n_2))
#print("* Number of blobs = %6d" % (n_3 + n_4))
#print("* Number of others = %6d" % (n_else) )
n_d = n_1 + n_2; n_b = n_3 + n_4
print("* | %5.1f | %6d | %6d | %6d | %4.1f |" % \
(dataset['r'],n_d,n_b,n_else,dataset['t']))
# Populate the data.
data[dataset['r']] = Entry(dataset['r'],n_1+n_2,n_3+n_4,dataset['t'])
print("* |-------------------------------------------|")
print("*")
#-------------------------------------------------------------------------
# Analysing the data
#-------------------------------------------------------------------------
#
# Create the arrays for the data analysis and plots.
ra = n.array([]) # The distance values [mm].
er = n.array([]) # The error on the distance [mm].
Ng = n.array([]) # The number of photons/s [s^{-1}].
eNg = n.array([]) # The error on the number of photons/s [s^{-1}]
oosqrtNg = n.array([]) # One Over the square root of N_g.
eoosqrtNg = n.array([]) # The error on 1/sqrt(N_g).
# Populate the arrays with the data.
for r, e in sorted(data.iteritems()):
# Add the distance to the array.
ra = n.append(ra, r)
# The error on the distance. We measured to the nearest millimeter.
er = n.append(er, 1.)
# Calculate the number of photons/s using the Entry class method.
Ng = n.append(Ng, e.Nphot())
# Get the error using the Entry class method.
eNg = n.append(eNg, e.NphotErr())
# Calculate 1/sqrt(N_g) and the error.
oosqrtNg = n.append(oosqrtNg, 1./n.sqrt(e.Nphot()))
eoosqrtNg = n.append(eoosqrtNg, \
0.5 * (1./n.sqrt(e.Nphot())) * ((e.NphotErr())/(e.Nphot())))
# Here we use the ROOT software suite to calculate the line of best fit.
# If you have ROOT installed, uncomment this section to produce the
# m and c variables yourself.
#
#datagraph = TGraphErrors(len(ra), ra, oosqrtNg, er, eoosqrtNg)
#
#fitB = TF1("fitB", "[0] * x + [1]", 39., 151.)
#fitB.SetParameter(0, 100.)
#fitB.SetParameter(1, 0.)
#datagraph.Fit("fitB", "R")
#
#m = fitB.GetParameter(0)
#c = fitB.GetParameter(1)
#
#datagraph.Draw("A*")
# If you're not using the ROOT functionality, we have provided the hard-coded
# values. Comment these out if you're using the code above.
m = 0.002398
c = -0.003646
print("*")
print("*------------------------")
print("* Fit parameters: mx + c ")
print("*------------------------")
print("* m = % 8.6f" % (m))
print("* c = % 8.6f" % (c))
print("*------------------------")
print("*")
#-------------------------------------------------------------------------
# Now we've made read in the data and recorded the cluster properties,
# we can make the plot.
# Fig. 1: r vs. sqrt(N_phot/s)
#-------------------------------------------------------------------------
# Create the plot.
rvsplot = plt.figure(101, figsize=(5.0, 5.0), \
dpi=150, \
facecolor='w', \
edgecolor='w')
#
rvsplot.subplots_adjust(bottom=0.15, left=0.15)
rvsplotax = rvsplot.add_subplot(111)
# y axis
plt.ylabel('$1 / \\sqrt{N_{\\gamma}}$ / s$\,^{1/2}$')
# x axis
plt.xlabel('$r$ / mm')
# Add a grid.
plt.grid(1)
#
# Plot the data with error bars.
plt.errorbar(ra, oosqrtNg, yerr=eoosqrtNg, xerr=er, \
fmt='', \
lw=0, \
color='black', \
ecolor='black', \
capthick=0, \
elinewidth=1, \
label='data')
# Create and plot the line of best fit.
x = n.arange(0.,160.,0.1)
y = m*x + c
plt.plot(x,y,'r-',label='line of best fit')
# Set the axis limits.
rvsplotax.set_xlim([0,160])
rvsplotax.set_ylim([0,0.4])
# Now add the legend with some customizations.
legend = rvsplotax.legend(loc='upper left', shadow=False, numpoints=1)
# Set the fontsize of the legend.
for label in legend.get_texts():
label.set_fontsize(12)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('w')
frame.set_linewidth(0.5)
# Save the figure.
rvsplot.savefig("r_vs_oosqrtNg.png")
print("*")
print("* Analysis complete!")
print("*")
| mit |
TomAugspurger/engarde | docs/sphinxext/ipython_directive.py | 3 | 37521 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = None
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value(None, 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| mit |
StratsOn/zipline | zipline/gens/tradesimulation.py | 2 | 15065 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.finance import trading
from zipline.protocol import (
BarData,
SIDData,
DATASOURCE_TYPE
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = normalize_date(self.sim_params.first_open)
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_open = self.algo.perf_tracker.market_open
mkt_close = self.algo.perf_tracker.market_close
# inject the current algo
# snapshot time to any log record generated.
with self.processor.threadbound():
data_frequency = self.sim_params.data_frequency
self._call_before_trading_start(mkt_open)
for date, snapshot in stream_in:
self.simulation_dt = date
self.on_dt_changed(date)
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
elif event.type == DATASOURCE_TYPE.TRADE:
self.update_universe(event)
self.algo.perf_tracker.process_trade(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
self.update_universe(event)
else:
message = self._process_snapshot(
date,
snapshot,
self.algo.instant_fill,
)
# Perf messages are only emitted if the snapshot contained
# a benchmark event.
if message is not None:
yield message
# When emitting minutely, we re-iterate the day as a
# packet with the entire days performance rolled up.
if date == mkt_close:
if self.algo.perf_tracker.emission_rate == 'minute':
daily_rollup = self.algo.perf_tracker.to_dict(
emission_type='daily'
)
daily_rollup['daily_perf']['recorded_vars'] = \
self.algo.recorded_vars
yield daily_rollup
tp = self.algo.perf_tracker.todays_performance
tp.rollover()
if mkt_close <= self.algo.perf_tracker.last_close:
before_last_close = \
mkt_close < self.algo.perf_tracker.last_close
try:
mkt_open, mkt_close = \
trading.environment \
.next_open_and_close(mkt_close)
except trading.NoFurtherDataError:
# If at the end of backtest history,
# skip advancing market close.
pass
if self.algo.perf_tracker.emission_rate == \
'minute':
self.algo.perf_tracker\
.handle_intraday_market_close(
mkt_open,
mkt_close)
if before_last_close:
self._call_before_trading_start(mkt_open)
elif data_frequency == 'daily':
next_day = trading.environment.next_trading_day(date)
if next_day is not None and \
next_day < self.algo.perf_tracker.last_close:
self._call_before_trading_start(next_day)
self.algo.portfolio_needs_update = True
self.algo.account_needs_update = True
self.algo.performance_needs_update = True
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def _process_snapshot(self, dt, snapshot, instant_fill):
"""
Process a stream of events corresponding to a single datetime, possibly
returning a perf message to be yielded.
If @instant_fill = True, we delay processing of events until after the
user's call to handle_data, and we process the user's placed orders
before the snapshot's events. Note that this introduces a lookahead
bias, since the user effectively is effectively placing orders that are
filled based on trades that happened prior to the call the handle_data.
If @instant_fill = False, we process Trade events before calling
handle_data. This means that orders are filled based on trades
occurring in the next snapshot. This is the more conservative model,
and as such it is the default behavior in TradingAlgorithm.
"""
# Flags indicating whether we saw any events of type TRADE and type
# BENCHMARK. Respectively, these control whether or not handle_data is
# called for this snapshot and whether we emit a perf message for this
# snapshot.
any_trade_occurred = False
benchmark_event_occurred = False
if instant_fill:
events_to_be_processed = []
# Assign process events to variables to avoid attribute access in
# innermost loops.
#
# Done here, to allow for perf_tracker or blotter to be swapped out
# or changed in between snapshots.
perf_process_trade = self.algo.perf_tracker.process_trade
perf_process_transaction = self.algo.perf_tracker.process_transaction
perf_process_order = self.algo.perf_tracker.process_order
perf_process_benchmark = self.algo.perf_tracker.process_benchmark
perf_process_split = self.algo.perf_tracker.process_split
perf_process_dividend = self.algo.perf_tracker.process_dividend
perf_process_commission = self.algo.perf_tracker.process_commission
blotter_process_trade = self.algo.blotter.process_trade
blotter_process_benchmark = self.algo.blotter.process_benchmark
# Containers for the snapshotted events, so that the events are
# processed in a predictable order, without relying on the sorted order
# of the individual sources.
# There is only one benchmark per snapshot, will be set to the current
# benchmark iff it occurs.
benchmark = None
# trades and customs are initialized as a list since process_snapshot
# is most often called on market bars, which could contain trades or
# custom events.
trades = []
customs = []
# splits and dividends are processed once a day.
#
# The avoidance of creating the list every time this is called is more
# to attempt to show that this is the infrequent case of the method,
# since the performance benefit from deferring the list allocation is
# marginal. splits list will be allocated when a split occurs in the
# snapshot.
splits = None
# dividends list will be allocated when a dividend occurs in the
# snapshot.
dividends = None
for event in snapshot:
if event.type == DATASOURCE_TYPE.TRADE:
trades.append(event)
elif event.type == DATASOURCE_TYPE.BENCHMARK:
benchmark = event
elif event.type == DATASOURCE_TYPE.SPLIT:
if splits is None:
splits = []
splits.append(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
customs.append(event)
elif event.type == DATASOURCE_TYPE.DIVIDEND:
if dividends is None:
dividends = []
dividends.append(event)
else:
raise log.warn("Unrecognized event=%s".format(event))
# Handle benchmark first.
#
# Internal broker implementation depends on the benchmark being
# processed first so that transactions and commissions reported from
# the broker can be injected.
if benchmark is not None:
benchmark_event_occurred = True
perf_process_benchmark(benchmark)
for txn, order in blotter_process_benchmark(benchmark):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
for trade in trades:
self.update_universe(trade)
any_trade_occurred = True
if instant_fill:
events_to_be_processed.append(trade)
else:
for txn, order in blotter_process_trade(trade):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
perf_process_trade(trade)
for custom in customs:
self.update_universe(custom)
if splits is not None:
for split in splits:
# process_split is not assigned to a variable since it is
# called rarely compared to the other event processors.
self.algo.blotter.process_split(split)
perf_process_split(split)
if dividends is not None:
for dividend in dividends:
perf_process_dividend(dividend)
if any_trade_occurred:
new_orders = self._call_handle_data()
for order in new_orders:
perf_process_order(order)
if instant_fill:
# Now that handle_data has been called and orders have been placed,
# process the event stream to fill user orders based on the events
# from this snapshot.
for trade in events_to_be_processed:
for txn, order in blotter_process_trade(trade):
if txn is not None:
perf_process_transaction(txn)
if order is not None:
perf_process_order(order)
perf_process_trade(trade)
if benchmark_event_occurred:
return self.get_message(dt)
else:
return None
def _call_handle_data(self):
"""
Call the user's handle_data, returning any orders placed by the algo
during the call.
"""
self.algo.event_manager.handle_data(
self.algo,
self.current_data,
self.simulation_dt,
)
orders = self.algo.blotter.new_orders
self.algo.blotter.new_orders = []
return orders
def _call_before_trading_start(self, dt):
dt = normalize_date(dt)
self.simulation_dt = dt
self.on_dt_changed(dt)
self.algo.before_trading_start()
def on_dt_changed(self, dt):
if self.algo.datetime != dt:
self.algo.on_dt_changed(dt)
def get_message(self, dt):
"""
Get a perf message for the given datetime.
"""
# Ensure that updated_portfolio has been called at least once for this
# dt before we emit a perf message. This is a no-op if
# updated_portfolio has already been called this dt.
self.algo.updated_portfolio()
self.algo.updated_account()
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close_daily()
perf_message['daily_perf']['recorded_vars'] = rvars
return perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
self.algo.perf_tracker.handle_minute_close(dt)
perf_message = self.algo.perf_tracker.to_dict()
perf_message['minute_perf']['recorded_vars'] = rvars
return perf_message
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
# rather than use if event.sid in ..., just trying
# and handling the exception is significantly faster
try:
sid_data = self.current_data[event.sid]
except KeyError:
sid_data = self.current_data[event.sid] = SIDData(event.sid)
sid_data.__dict__.update(event.__dict__)
| apache-2.0 |
automl/paramsklearn | ParamSklearn/components/feature_preprocessing/feature_agglomeration.py | 1 | 3299 | import numpy as np
import sklearn.cluster
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformIntegerHyperparameter
from HPOlibConfigSpace.forbidden import ForbiddenInClause, \
ForbiddenAndConjunction, ForbiddenEqualsClause
from ParamSklearn.components.base import \
ParamSklearnPreprocessingAlgorithm
from ParamSklearn.constants import *
class FeatureAgglomeration(ParamSklearnPreprocessingAlgorithm):
def __init__(self, n_clusters, affinity, linkage, pooling_func,
random_state=None):
self.n_clusters = int(n_clusters)
self.affinity = affinity
self.linkage = linkage
self.pooling_func = pooling_func
self.random_state = random_state
self.pooling_func_mapping = dict(mean=np.mean,
median=np.median,
max=np.max)
def fit(self, X, Y=None):
n_clusters = min(self.n_clusters, X.shape[1])
if not callable(self.pooling_func):
self.pooling_func = self.pooling_func_mapping[self.pooling_func]
self.preprocessor = sklearn.cluster.FeatureAgglomeration(
n_clusters=n_clusters, affinity=self.affinity,
linkage=self.linkage, pooling_func=self.pooling_func)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Feature Agglomeration',
'name': 'Feature Agglomeration',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': True,
'prefers_data_normalized': True,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_clusters = cs.add_hyperparameter(UniformIntegerHyperparameter(
"n_clusters", 2, 400, 25))
affinity = cs.add_hyperparameter(CategoricalHyperparameter(
"affinity", ["euclidean", "manhattan", "cosine"], "euclidean"))
linkage = cs.add_hyperparameter(CategoricalHyperparameter(
"linkage", ["ward", "complete", "average"], "ward"))
pooling_func = cs.add_hyperparameter(CategoricalHyperparameter(
"pooling_func", ["mean", "median", "max"]))
affinity_and_linkage = ForbiddenAndConjunction(
ForbiddenInClause(affinity, ["manhattan", "cosine"]),
ForbiddenEqualsClause(linkage, "ward"))
cs.add_forbidden_clause(affinity_and_linkage)
return cs
| bsd-3-clause |
keitaroyam/yamtbx | yamtbx/dataproc/auto/cc_clustering.py | 1 | 13056 | """
(c) RIKEN 2015-2017. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from cctbx.array_family import flex
from cctbx import miller
from libtbx import easy_mp
from libtbx.utils import null_out
from libtbx.utils import Sorry
from yamtbx.util import call
from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII
from yamtbx.dataproc.auto.blend import load_xds_data_only_indices
import os
import numpy
import collections
import scipy.cluster
import scipy.spatial
import json
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
def calc_cc(ari, arj):
ari, arj = ari.common_sets(arj, assert_is_similar_symmetry=False)
corr = flex.linear_correlation(ari.data(), arj.data())
if corr.is_well_defined():
return corr.coefficient(), ari.size()
else:
return float("nan"), ari.size()
# calc_cc()
def read_xac_files(xac_files, d_min=None, d_max=None, min_ios=None):
arrays = collections.OrderedDict()
for f in xac_files:
xac = XDS_ASCII(f, i_only=True)
xac.remove_rejected()
a = xac.i_obs().resolution_filter(d_min=d_min, d_max=d_max)
a = a.as_non_anomalous_array().merge_equivalents(use_internal_variance=False).array()
if min_ios is not None: a = a.select(a.data()/a.sigmas()>=min_ios)
arrays[f] = a
return arrays
# read_xac_files()
class CCClustering:
def __init__(self, wdir, xac_files, d_min=None, d_max=None, min_ios=None):
self.arrays = read_xac_files(xac_files, d_min=d_min, d_max=d_max, min_ios=min_ios)
self.wdir = wdir
self.clusters = {}
self.all_cc = {} # {(i,j):cc, ...}
if not os.path.exists(self.wdir): os.makedirs(self.wdir)
open(os.path.join(self.wdir, "filenames.lst"), "w").write("\n".join(xac_files))
# __init__()
def do_clustering(self, nproc=1, b_scale=False, use_normalized=False, cluster_method="ward", distance_eqn="sqrt(1-cc)", min_common_refs=3, html_maker=None):
"""
Using correlation as distance metric (for hierarchical clustering)
https://stats.stackexchange.com/questions/165194/using-correlation-as-distance-metric-for-hierarchical-clustering
Correlation "Distances" and Hierarchical Clustering
http://research.stowers.org/mcm/efg/R/Visualization/cor-cluster/index.htm
"""
self.clusters = {}
prefix = os.path.join(self.wdir, "cctable")
assert (b_scale, use_normalized).count(True) <= 1
distance_eqns = {"sqrt(1-cc)": lambda x: numpy.sqrt(1.-x),
"1-cc": lambda x: 1.-x,
"sqrt(1-cc^2)": lambda x: numpy.sqrt(1.-x**2),
}
cc_to_distance = distance_eqns[distance_eqn] # Fail when unknown options
assert cluster_method in ("single", "complete", "average", "weighted", "centroid", "median", "ward") # available methods in scipy
if len(self.arrays) < 2:
print "WARNING: less than two data! can't do cc-based clustering"
self.clusters[1] = [float("nan"), [0]]
return
# Absolute scaling using Wilson-B factor
if b_scale:
from mmtbx.scaling.matthews import p_vm_calculator
from mmtbx.scaling.absolute_scaling import ml_iso_absolute_scaling
ofs_wilson = open("%s_wilson_scales.dat"%prefix, "w")
n_residues = p_vm_calculator(self.arrays.values()[0], 1, 0).best_guess
ofs_wilson.write("# guessed n_residues= %d\n" % n_residues)
ofs_wilson.write("file wilsonB\n")
for f in self.arrays:
arr = self.arrays[f]
iso_scale_and_b = ml_iso_absolute_scaling(arr, n_residues, 0)
wilson_b = iso_scale_and_b.b_wilson
ofs_wilson.write("%s %.3f\n" % (f, wilson_b))
if wilson_b > 0: # Ignoring data with B<0? is a bad idea.. but how..?
tmp = flex.exp(-2. * wilson_b * arr.unit_cell().d_star_sq(arr.indices())/4.)
self.arrays[f] = arr.customized_copy(data=arr.data()*tmp,
sigmas=arr.sigmas()*tmp)
ofs_wilson.close()
elif use_normalized:
from mmtbx.scaling.absolute_scaling import kernel_normalisation
failed = {}
for f in self.arrays:
arr = self.arrays[f]
try:
normaliser = kernel_normalisation(arr, auto_kernel=True)
self.arrays[f] = arr.customized_copy(data=arr.data()/normaliser.normalizer_for_miller_array,
sigmas=arr.sigmas()/normaliser.normalizer_for_miller_array)
except Exception, e:
failed.setdefault(e.message, []).append(f)
if failed:
msg = ""
for r in failed: msg += " %s\n%s\n" % (r, "\n".join(map(lambda x: " %s"%x, failed[r])))
raise Sorry("intensity normalization failed by following reason(s):\n%s"%msg)
# Prep
args = []
for i in xrange(len(self.arrays)-1):
for j in xrange(i+1, len(self.arrays)):
args.append((i,j))
# Calc all CC
worker = lambda x: calc_cc(self.arrays.values()[x[0]], self.arrays.values()[x[1]])
results = easy_mp.pool_map(fixed_func=worker,
args=args,
processes=nproc)
# Check NaN and decide which data to remove
idx_bad = {}
nans = []
cc_data_for_html = []
for (i,j), (cc,nref) in zip(args, results):
cc_data_for_html.append((i,j,cc,nref))
if cc==cc and nref>=min_common_refs: continue
idx_bad[i] = idx_bad.get(i, 0) + 1
idx_bad[j] = idx_bad.get(j, 0) + 1
nans.append([i,j])
if html_maker is not None:
html_maker.add_cc_clustering_details(cc_data_for_html)
idx_bad = idx_bad.items()
idx_bad.sort(key=lambda x:x[1])
remove_idxes = set()
for idx, badcount in reversed(idx_bad):
if len(filter(lambda x: idx in x, nans)) == 0: continue
remove_idxes.add(idx)
nans = filter(lambda x: idx not in x, nans)
if len(nans) == 0: break
use_idxes = filter(lambda x: x not in remove_idxes, xrange(len(self.arrays)))
N = len(use_idxes)
# Make table: original index (in file list) -> new index (in matrix)
count = 0
org2now = collections.OrderedDict()
for i in xrange(len(self.arrays)):
if i in remove_idxes: continue
org2now[i] = count
count += 1
if len(remove_idxes) > 0:
open("%s_notused.lst"%prefix, "w").write("\n".join(map(lambda x: self.arrays.keys()[x], remove_idxes)))
# Make matrix
mat = numpy.zeros(shape=(N, N))
self.all_cc = {}
ofs = open("%s.dat"%prefix, "w")
ofs.write(" i j cc nref\n")
for (i,j), (cc,nref) in zip(args, results):
ofs.write("%4d %4d % .4f %4d\n" % (i,j,cc,nref))
self.all_cc[(i,j)] = cc
if i not in remove_idxes and j not in remove_idxes:
mat[org2now[j], org2now[i]] = cc_to_distance(min(cc, 1.)) #numpy.sqrt(1.-min(cc, 1.)) # safety guard (once I encounterd..
ofs.close()
# Perform cluster analysis
D = scipy.spatial.distance.squareform(mat+mat.T) # convert to reduced form (first symmetrize)
Z = scipy.cluster.hierarchy.linkage(D, cluster_method) # doesn't work with SciPy 0.17 (works with 0.18 or newer?)
pyplot.figure(figsize=(max(5, min(N,200)/10.),)*2, dpi=100)
pyplot.title("%s, %s" % (distance_eqn, cluster_method))
hclabels = map(lambda x: x+1, org2now.keys())
scipy.cluster.hierarchy.dendrogram(Z, orientation="left", labels=hclabels)
pyplot.savefig(os.path.join(self.wdir, "tree.png"))
pyplot.savefig(os.path.join(self.wdir, "tree.pdf"))
def traverse(node, results, dendro):
# Cluster id starts with the number of data files
leaves = map(lambda x: hclabels[x], sorted(node.pre_order())) # file numbers
if not node.right and not node.left: # this must be leaf
dendro["children"].append(dict(name=str(hclabels[node.id]))) # file number
else:
dendro["children"].append(dict(name=str(node.id-N+1), children=[])) # cluster number
results.append((node.id-N+1, node.dist, node.count, leaves))
if node.right: traverse(node.right, results, dendro["children"][-1])
if node.left: traverse(node.left, results, dendro["children"][-1])
# traverse()
results = []
dendro = dict(name="root", children=[])
node = scipy.cluster.hierarchy.to_tree(Z)
traverse(node, results, dendro)
dendro = dendro["children"][0]
results.sort(key=lambda x: x[0])
json.dump(dendro, open(os.path.join(self.wdir, "dendro.json"), "w"))
# Save CLUSTERS.txt and set self.clusters
ofs = open(os.path.join(self.wdir, "CLUSTERS.txt"), "w")
ofs.write("ClNumber Nds Clheight IDs\n")
for clid, dist, ncount, leaves in results:
leavestr = " ".join(map(str, leaves))
ofs.write("%04d %4d %7.3f %s\n" % (clid, ncount, dist, leavestr))
self.clusters[int(clid)] = [float(dist), leaves]
ofs.close()
# do_clustering()
def cluster_completeness(self, clno, anomalous_flag, d_min, calc_redundancy=True):
if clno not in self.clusters:
print "Cluster No. %d not found" % clno
return
cls = self.clusters[clno][-1]
msets = map(lambda x: self.miller_sets[self.arrays.keys()[x-1]], cls)
#msets = map(lambda x: self.arrays.values()[x-1], cls)
num_idx = sum(map(lambda x: x.size(), msets))
all_idx = flex.miller_index()
all_idx.reserve(num_idx)
for mset in msets: all_idx.extend(mset.indices())
# Calc median cell
cells = numpy.array(map(lambda x: x.unit_cell().parameters(), msets))
median_cell = map(lambda i: numpy.median(cells[:,i]), xrange(6))
symm = msets[0].customized_copy(unit_cell=median_cell)
assert anomalous_flag is not None
# XXX all must belong to the same Laue group and appropriately reindexed..
all_set = miller.set(indices=all_idx,
crystal_symmetry=symm, anomalous_flag=anomalous_flag)
all_set = all_set.resolution_filter(d_min=d_min)
# dummy for redundancy calculation. dirty way..
if calc_redundancy:
dummy_array = miller.array(miller_set=all_set, data=flex.int(all_set.size()))
merge = dummy_array.merge_equivalents()
cmpl = merge.array().completeness()
redun = merge.redundancies().as_double().mean()
return cmpl, redun
else:
cmpl = all_set.unique_under_symmetry().completeness()
return cmpl
# cluster_completeness()
def get_all_cc_in_cluster(self, clno):
IDs = self.clusters[clno][1]
ret = []
for i in xrange(len(IDs)-1):
for j in xrange(i+1, len(IDs)):
ids = IDs[i]-1, IDs[j]-1
ret.append(self.all_cc[(min(ids), max(ids))])
return ret
# get_all_cc_in_cluster()
def show_cluster_summary(self, d_min, out=null_out()):
tmp = []
self.miller_sets = load_xds_data_only_indices(xac_files=self.arrays.keys(), d_min=d_min)
for clno in self.clusters:
cluster_height, IDs = self.clusters[clno]
cmpl, redun = self.cluster_completeness(clno, anomalous_flag=False, d_min=d_min)
acmpl, aredun = self.cluster_completeness(clno, anomalous_flag=True, d_min=d_min)
all_cc = self.get_all_cc_in_cluster(clno)
ccmean, ccmin = numpy.mean(all_cc), min(all_cc)
tmp.append((clno, IDs, cluster_height, cmpl*100., redun, acmpl*100., aredun, ccmean, ccmin))
self.miller_sets = None # clear memory
tmp.sort(key=lambda x: (-x[4], -x[3])) # redundancy & completeness
out.write("# d_min= %.3f\n" % (d_min))
out.write("# Sorted by redundancy & completeness\n")
out.write("Cluster Number CLh Cmpl Redun ACmpl ARedun CCmean CCmin\n")
for clno, IDs, clh, cmpl, redun, acmpl, aredun, ccmean, ccmin in tmp:
out.write("%7d %6d %5.1f %6.2f %5.1f %6.2f %5.1f %.4f %.4f\n" % (clno, len(IDs), clh, cmpl, redun,
acmpl, aredun, ccmean, ccmin))
return tmp
# show_cluster_summary()
# class CCClustering
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
spennihana/h2o-3 | h2o-py/h2o/model/dim_reduction.py | 4 | 4936 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.utils.shared_utils import can_use_pandas
from h2o.utils.compatibility import * # NOQA
from .model_base import ModelBase
from .metrics_base import * # NOQA
import h2o
class H2ODimReductionModel(ModelBase):
"""
Dimension reduction model, such as PCA or GLRM.
"""
def varimp(self, use_pandas=False):
"""
Return the Importance of components associcated with a pca model.
use_pandas: ``bool`` (default: ``False``).
"""
model = self._model_json["output"]
if "importance" in list(model.keys()) and model["importance"]:
vals = model["importance"].cell_values
header = model["importance"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have importances of components.")
def num_iterations(self):
"""Get the number of iterations that it took to converge or reach max iterations."""
o = self._model_json["output"]
return o["model_summary"]["number_of_iterations"][0]
def objective(self):
"""Get the final value of the objective function."""
o = self._model_json["output"]
return o["model_summary"]["final_objective_value"][0]
def final_step(self):
"""Get the final step size for the model."""
o = self._model_json["output"]
return o["model_summary"]["final_step_size"][0]
def archetypes(self):
"""The archetypes (Y) of the GLRM model."""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def reconstruct(self, test_data, reverse_transform=False):
"""
Reconstruct the training data from the model and impute all missing values.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the reconstructed frame.
:returns: the approximate reconstruction of the training data.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"reconstruct_train": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def proj_archetypes(self, test_data, reverse_transform=False):
"""
Convert archetypes of the model into original feature space.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the projected archetypes.
:returns: model archetypes projected back into the original training data's feature space.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"project_archetypes": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot.
Library ``matplotlib`` is required for this function.
:param str type: either ``"barplot"`` or ``"lines"``.
"""
# check for matplotlib. exit if absent.
is_server = kwargs.pop("server")
if kwargs:
raise ValueError("Unknown arguments %s to screeplot()" % ", ".join(kwargs.keys()))
try:
import matplotlib
if is_server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
variances = [s ** 2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(list(range(1, len(variances) + 1)))
if type == "barplot":
plt.bar(list(range(1, len(variances) + 1)), variances)
elif type == "lines":
plt.plot(list(range(1, len(variances) + 1)), variances, 'b--')
if not is_server: plt.show()
| apache-2.0 |
mikebenfield/scikit-learn | doc/conf.py | 22 | 9789 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
walterreade/scikit-learn | examples/model_selection/plot_roc_crossval.py | 37 | 3474 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
peter-kiechle/tactile-sensors | python/denavit-hartenberg/grasp_preshape_close_ratio.py | 1 | 5763 | # -*- coding: utf-8 -*-
# Load configuration file before pyplot
import os, sys
config_path = os.path.abspath('../matplotlib/')
sys.path.append(config_path)
import configuration as config
# Library path
import os, sys
print("CWD: " + os.getcwd() )
lib_path = os.path.abspath('../../lib')
sys.path.append(lib_path)
import numpy as np
import matplotlib.pyplot as plt
import DenavitHartenberg as DH
# Force reloading of libraries (convenient during active development)
#reload(DH)
UIBK_blue = [0.0, 0.1765, 0.4392]
UIBK_orange = [1.0, 0.5, 0.0]
def project_active_cells_proximal(tsframe, T02):
points = np.empty([0,3])
for y in range(14):
for x in range(6):
if(tsframe[y,x] > 0.0):
vertex = DH.get_xyz_proximal(x, y, T02)
points = np.vstack((points, vertex))
return points
def project_active_cells_distal(tsframe, T03):
points = np.empty([0,3])
for y in range(13):
for x in range(6):
if(tsframe[y,x] > 0.0):
vertex = DH.get_xyz_distal(x, y, T03)
points = np.vstack((points, vertex))
return points
def preshape_pinch(close_ratio):
Phi0 = 90 # Rotational axis (Finger 0 + 2)
# Finger 0
Phi1 = -72 + close_ratio * 82
Phi2 = 72 - close_ratio * 82
# Finger 1
Phi3 = -90
Phi4 = 0
# Finger 2
Phi5 = -72 + close_ratio * 82
Phi6 = 72 - close_ratio * 82
return Phi0, Phi1, Phi2, Phi3, Phi4, Phi5, Phi6
x = 2.5
y = 5
distance = []
Phi1_list = []
Phi2_list = []
Phi5_list = []
Phi6_list = []
close_ratios = np.linspace(0, 1, 20)
for close_ratio in close_ratios:
Phi0, Phi1, Phi2, Phi3, Phi4, Phi5, Phi6 = preshape_pinch(close_ratio) # Simulated grasp
Phi1_list.append(Phi1)
Phi2_list.append(Phi2)
Phi5_list.append(Phi5)
Phi6_list.append(Phi6)
# Compute transformation matrices
T01_f0, T02_f0, T03_f0 = DH.create_transformation_matrices_f0(Phi0, Phi1, Phi2) # Finger 0
T01_f2, T02_f2, T03_f2 = DH.create_transformation_matrices_f2(Phi0, Phi5, Phi6) # Finger 2
# DH-Transform: finger 0
P_dist = DH.create_P_dist(y)
T_total = T03_f0.dot(P_dist)
p = np.array([y, 0.0, x, 1.0])
xyz_0 = T_total.dot(p)[0:3] # remove w
# DH-Transform: finger 2
P_dist = DH.create_P_dist(y)
T_total = T03_f2.dot(P_dist)
p = np.array([y, 0.0, x, 1.0])
xyz_2 = T_total.dot(p)[0:3] # remove w
# Distance between specified points on finger 0 and 2
distance.append( np.sqrt(np.sum((xyz_0-xyz_2)**2)) )
distance_list = np.array(distance)
distance_diff = np.absolute(np.diff(distance_list))
Phi1_diff = np.diff(np.array(Phi1_list))
Phi2_diff = np.diff(np.array(Phi2_list))
Phi5_diff = np.diff(np.array(Phi5_list))
Phi6_diff = np.diff(np.array(Phi6_list))
combined_diff = np.vstack([Phi1_diff, Phi2_diff, Phi5_diff, Phi6_diff])
angle_diff = np.max(combined_diff, axis=0)
angular_velocity = 10 # degree / second
time_steps = angle_diff / angular_velocity
time = np.hstack([ np.array([0]), np.cumsum(angle_diff) ]) / angular_velocity
velocity_distance = distance_diff / time_steps
velocity_distance = np.hstack([velocity_distance[0], velocity_distance ])
############
# Plotting
###########
text_width = 6.30045 # LaTeX text width in inches
golden_ratio = (1 + np.sqrt(5) ) / 2.0
size_factor = 1.0
figure_width = size_factor*text_width
#figure_height = (figure_width / golden_ratio)
figure_height = 1.0 * figure_width
figure_size = [figure_width, figure_height]
config.load_config_medium()
#---------------------------------------------------------
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=False, sharey=False, squeeze=True, figsize=figure_size)
#--------------------------------------------------------------------------
ax = axes[0]
ax.plot(close_ratios[1:-1], distance[1:-1], linestyle="-", color=config.UIBK_orange, alpha=1.0, label="Distance",
marker='o', markeredgewidth=0.75, markersize=3.0, markeredgecolor=config.UIBK_orange, markerfacecolor=[1.0, 1.0, 1.0] )
ax.set_xlabel("Close ratio")
ax.set_ylabel(r"Distance [mm]", rotation=90)
#.set_xlim([0, 2.5])
#ax.set_ylim([0, 850])
# Legend
ax.legend(loc = 'upper right', fancybox=True, shadow=False, framealpha=1.0)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
ax = axes[1]
ax.plot(time[1:-1], distance[1:-1], linestyle="-", color=config.UIBK_orange, alpha=1.0, label="Distance",
marker='o', markeredgewidth=0.75, markersize=3.0, markeredgecolor=config.UIBK_orange, markerfacecolor=[1.0, 1.0, 1.0] )
ax.set_xlabel("Time [s]")
ax.set_ylabel(r"Distance [mm]", rotation=90)
ax.set_xlim([0, time[-1]])
#ax.set_ylim([0, 850])
# Legend
ax.legend(loc = 'upper right', fancybox=True, shadow=False, framealpha=1.0)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
ax = axes[2]
ax.plot(time[1:-1], velocity_distance[1:-1], linestyle="-", color=config.UIBK_orange, alpha=1.0, label="Velocity",
marker='o', markeredgewidth=0.75, markersize=3.0, markeredgecolor=config.UIBK_orange, markerfacecolor=[1.0, 1.0, 1.0] )
ax.set_xlabel("Time [s]")
ax.set_ylabel(r"Velocity [mm / s]", rotation=90)
ax.set_xlim([0, time[-1]])
#ax.set_ylim([0, 850])
# Legend
ax.legend(loc = 'lower right', fancybox=True, shadow=False, framealpha=1.0)
#--------------------------------------------------------------------------
fig.tight_layout()
#plt.show()
plotname = "grasp_preshape_close_ratio"
fig.savefig(plotname+".pdf", pad_inches=0, dpi=fig.dpi) # pdf
#fig.savefig(plotname+".pgf", pad_inches=0, dpi=fig.dpi) # pgf | gpl-3.0 |
CTJChen/ctc_astropylib | mass_to_light.py | 1 | 5799 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = """
if you have a set of SDSS magnitudes at a given redshift, you can get
the stellar mass by doing the following:
from mass_to_light import *
sdss_z09(sdssmags,z=redshift,color = 'r-z')
If you want to change the cosmology usead you should modify the source code below.
Also, the software calls for a table from Zibetti et al. 2009, you can change the path as you wish.
"""
import numpy as np
from astropy.coordinates.distances import Distance
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
cosmo=FlatLambdaCDM(H0=70,Om0=0.3)
import pandas as pd
from os.path import expanduser
home = expanduser("~")
storez09=pd.HDFStore(home+'/zibetti2009.h5')
store=storez09
dict_wav={'u':0.3543,'g':0.4770,'r':0.6231,'i':0.7625,'z':0.9134,
'U':0.36,'B':0.44,'V':0.55,'R':0.64,'I':0.79}
mabs_sun = {'u':6.41, 'g':5.15, 'r':4.67, 'i':4.56, 'z':4.53}
def flux_to_nulnu(flux, z, wav, ld=None, lsun=None, cosmo=None,mujy=None):
# convert a flux to nulnu at a certain wavelength in the unit of erg/s
# wav should be in micron, and flux should be in Jy
if cosmo is None:
# print 'no preset cosmology,use FlatLCDM w/ h0.7'
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
dlum = Distance(z=z, unit=u.cm, cosmology=cosmo).value
freq = 3e14/wav
if mujy:jy2erg=1e29
else:jy2erg=1e23
nulnu = np.log10(flux*4*np.pi*dlum**2*freq/((1.+z)*jy2erg))
if lsun is True:
nulnu -= 33.5827
return nulnu
def sdss_mag_to_jy(inp, band=None, mujy=None, inv=None):
#
# in Jy
inp = np.asarray(inp)
if not inv:
if band == 'u':
fiso_sdss = 3767.266
elif band == 'g':
fiso_sdss = 3631.
elif band == 'r':
fiso_sdss = 3631.
if band == 'i':
fiso_sdss = 3631.
if band == 'z':
fiso_sdss = 3564.727
elif band is None:
fiso_sdss = np.asarray([3767.266, 3631., 3631., 3631., 3564.727])
sdss_jy = 10**(-1*inp/2.5)*fiso_sdss
if mujy is True:
sdss_jy = 1e6*sdss_jy
return sdss_jy
else:
if mujy:inp=inp/1e6
if band == 'u':
fiso_sdss = 3767.266
elif band == 'g':
fiso_sdss = 3631.
elif band == 'r':
fiso_sdss = 3631.
if band == 'i':
fiso_sdss = 3631.
if band == 'z':
fiso_sdss = 3564.727
elif band is None:
fiso_sdss = np.asarray([3767.266, 3631., 3631., 3631., 3564.727])
sdss_mag = np.log10(inp/fiso_sdss)*(-2.5)
return sdss_mag
def mtl_mstar(mags,band,color,color_str,redshift,ld=None,close=None, method='z09'):
'''
Use the Zibetti 2009 table B1 values (MTL = mass-to-light ratio)
or Bell 2003, default is z09
to estimate stellar mass quickly
input:
1. magnitude(s) : array of magnitudes
if it's ubvri, the mags should be in AB
2. band(s) : array of strings, in the order of blue->red, don't use u/U band
3. colors for sdss:u-g~z, g-r~z,r-i,r-z
4. redshift :
set ld if luminosity distance is passed instaed of redshift
'''
def get_lum(mag,redshift,ld=ld):
#convert mags to fluxes
flux=[]
if len(band)==5:
flux=sdss_mag_to_jy(mag)
else:
for i in range(len(band)):
flux.append(sdss_mag_to_jy(mag[i],band=band[i]))
#now calculate luminosity distances using z and flux
flux=np.asarray(flux)
lband=[]
lband=flux_to_nulnu(flux,redshift,wav,lsun=True,cosmo=cosmo,ld=ld)
#in log lsun unit
return lband
def get_pars(band,color_str):
if method is 'z09':
mtl_sdss=store['z09_sdss']
#mtl_bri=store['z09_bri']
elif method is 'b03':
mtl_bri=store['b03_bri']
mtl_sdss = store['b03_sdss']
#if ubv is True:
# pars=mtl_bri.loc[color_str,band].values
else:
print('method could only be z09 or b03')
pars = mtl_sdss.loc[color_str,band].values
return pars
wav=np.zeros(len(band),dtype=np.float64)
for i in range(len(band)):
wav[i]=dict_wav[band[i]]
#Using lband, pars and mag_band to calculate mass_band
lband=get_lum(mags,redshift)
#print lband
mass_band=np.zeros((len(band),len(color_str)),dtype=np.float64)
for i in range(len(band)):
for j in range(len(color_str)):
pars = get_pars(band[i],color_str[j])
mass_band[i,j]=lband[i]+pars[0]+pars[1]*color[j]
if close:store.close()
return mass_band
def sdss_mass(sdssmags, z=None,color = None, band=None, method='z09'):
'''
call z09_mstar to calculate the stellar mass
using the input sdss magnitudes
default: band = 'i', color = 'g-i'
'''
if color is None:
color = 'g-i'
if band is None:
band = 'i'
if z is None:
z=0.000000001
umag, gmag, rmag, imag, zmag = sdssmags
color_str = ['u-g','u-r','u-i','u-z','g-r','g-i','g-z','r-i','r-z']
sdsscolor = [umag-gmag, umag-rmag,umag-imag,umag-zmag,gmag-rmag,gmag-imag,gmag-zmag,rmag-imag,rmag-zmag]
colors=pd.DataFrame({'bands':color_str,'color':sdsscolor})
mags = [gmag, rmag, imag, zmag]
bands = ['g','r','i','z']
if method is 'z09':
zmstar = mtl_mstar(mags,bands,sdsscolor,color_str,z,method=method)
mstar = pd.DataFrame(zmstar,index=bands,columns=color_str)
elif method is 'b03':
bmstar = mtl_mstar(mags,bands,sdsscolor,color_str,z,method=method)
mstar = pd.DataFrame(zmstar,index=bands,columns=color_str)
else:
print('method can only be z09 or b03')
return mstar.loc[band,color]
| apache-2.0 |
zhenv5/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
wfclark/hamlet | remove_members.py | 2 | 1025 | import sys
import os
import datetime
import psycopg2
import pandas
from subprocess import call, Popen
print "removing temporary files..."
conn_string = "dbname='hamlethurricane' user=postgres port='5432' host='127.0.0.1' password='password'"
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
os.system('exit')
hurricane_name = 'ARTHUR'
dataframe_cur = conn.cursor()
dataframe_sql = """Select * from hurricane_{}""".format(hurricane_name)
dataframe_cur.execute(dataframe_sql)
data = dataframe_cur.fetchall()
colnames = [desc[0] for desc in dataframe_cur.description]
dataframe = pandas.DataFrame(data)
dataframe.columns = colnames
conn.commit()
range_feat = range(len(dataframe))
range_feat_strp = str(range_feat).strip('[]')
range_feat_strp_v2 = range_feat_strp.split(',')
for key in range(1,len(dataframe)):
remove_members = 'sudo rm {}_{}.*'.format(hurricane_name, key)
os.system(remove_members)
print "finish" | bsd-3-clause |
hughperkins/gpu-experiments | gpuexperiments/occupancy_dyn_graphs.py | 1 | 2094 | """
Try using dynamic shared memory, see if gets optimized away, or affects occupancy
"""
from __future__ import print_function, division
import argparse
import string
import numpy as np
import os
import matplotlib.pyplot as plt
plt.rcdefaults()
import matplotlib.pyplot as plt
from os.path import join
parser = argparse.ArgumentParser()
parser.add_argument('--devicename')
args = parser.parse_args()
times = []
assert args.devicename is not None
deviceNameSimple = args.devicename
f = open('results/occupancy_dyn_%s.tsv' % args.devicename, 'r')
f.readline()
for line in f:
split_line = line.split('\t')
times.append({'name': split_line[0], 'time': float(split_line[1]), 'flops': float(split_line[2])})
f.close()
X32_list = []
Y32_list = []
X64_list = []
Y64_list = []
for timeinfo in times:
name = timeinfo['name']
if not name.startswith('k1_g1024_b'):
continue
block = int(name.split('_')[2].replace('b', ''))
x = int(name.split('_')[-1].replace('s', ''))
y = timeinfo['flops']
if block == 32:
X32_list.append(x)
Y32_list.append(y)
elif block == 64:
X64_list.append(x)
Y64_list.append(y)
X32 = np.array(X32_list)
X64 = np.array(X64_list)
Y32 = np.array(Y32_list)
Y64 = np.array(Y64_list)
plt.plot(X32, Y32, label='blocksize 32')
plt.plot(X64, Y64, label='blocksize 64')
plt.axis([0, max(X32), 0, max(Y64)])
plt.title(deviceNameSimple)
plt.xlabel('Shared memory per block (KiB)')
plt.ylabel('GFLOPS')
legend = plt.legend(loc='upper right') # fontsize='x-large')
plt.savefig('/tmp/occupancy_by_shared_%s.png' % deviceNameSimple, dpi=150)
plt.close()
X_list = []
Y_list = []
for timeinfo in times:
name = timeinfo['name']
if not name.startswith('kernel_bsm'):
continue
X_list.append(int(name.split('bsm')[1].split(' ')[0]))
Y_list.append(timeinfo['flops'])
X = np.array(X_list)
Y = np.array(Y_list)
plt.plot(X, Y)
plt.axis([0, max(X), 0, max(Y)])
plt.title(deviceNameSimple)
plt.xlabel('blocks per SM')
plt.ylabel('GFLOPS')
plt.savefig('/tmp/occupancy_%s.png' % deviceNameSimple, dpi=150)
| bsd-2-clause |
abali96/Shapely | docs/code/parallel_offset.py | 5 | 2025 | from matplotlib import pyplot
from shapely.geometry import LineString
from descartes import PolygonPatch
from figures import SIZE, BLUE, GRAY
def plot_coords(ax, x, y, color='#999999', zorder=1):
ax.plot(x, y, 'o', color=color, zorder=zorder)
def plot_line(ax, ob, color=GRAY):
parts = hasattr(ob, 'geoms') and ob or [ob]
for part in parts:
x, y = part.xy
ax.plot(x, y, color=color, linewidth=3, solid_capstyle='round', zorder=1)
def set_limits(ax, x_range, y_range):
ax.set_xlim(*x_range)
ax.set_xticks(range(*x_range) + [x_range[-1]])
ax.set_ylim(*y_range)
ax.set_yticks(range(*y_range) + [y_range[-1]])
ax.set_aspect(1)
line = LineString([(0, 0), (1, 1), (0, 2), (2, 2), (3, 1), (1, 0)])
line_bounds = line.bounds
ax_range = [int(line_bounds[0] - 1.0), int(line_bounds[2] + 1.0)]
ay_range = [int(line_bounds[1] - 1.0), int(line_bounds[3] + 1.0)]
fig = pyplot.figure(1, figsize=(SIZE[0], 2 * SIZE[1]), dpi=90)
# 1
ax = fig.add_subplot(221)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'left', join_style=1)
plot_line(ax, offset, color=BLUE)
ax.set_title('a) left, round')
set_limits(ax, ax_range, ay_range)
#2
ax = fig.add_subplot(222)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'left', join_style=2)
plot_line(ax, offset, color=BLUE)
ax.set_title('b) left, mitred')
set_limits(ax, ax_range, ay_range)
#3
ax = fig.add_subplot(223)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'left', join_style=3)
plot_line(ax, offset, color=BLUE)
ax.set_title('c) left, beveled')
set_limits(ax, ax_range, ay_range)
#4
ax = fig.add_subplot(224)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'right', join_style=1)
plot_line(ax, offset, color=BLUE)
ax.set_title('d) right, round')
set_limits(ax, ax_range, ay_range)
pyplot.show()
| bsd-3-clause |
highlando/dolfin_navier_scipy | tests/deprecatedtests/solve_nse_quadraticterm.py | 1 | 7063 | import dolfin
import scipy.sparse as sps
import scipy.sparse.linalg as spsla
import numpy as np
# from dolfin import dx, grad, inner
import dolfin_navier_scipy as dns
import dolfin_navier_scipy.dolfin_to_sparrays as dnsts
import dolfin_navier_scipy.stokes_navier_utils as snu
import dolfin_navier_scipy.data_output_utils as dou
# dolfin.parameters.linear_algebra_backend = "uBLAS"
linnsesol = True # whether to linearize about the NSE solution
debug = False
# debug = True
timeint = False
compevs = False
# timeint = True
compevs = True
def linearzd_quadterm(H, linv, hlstr=None):
print('TODO: this function will be deprecated soon')
print('see ~/work/code/nse-quad-refree/python/conv_tensor_utils.py')
print('for a maintained version')
try:
HLm = dou.load_spa(hlstr + '.mtx')
print('loaded `hlmat`')
except IOError:
print('assembling hlmat ...')
nv = linv.size
# HLm = np.array(H * (sps.kron(sps.eye(nv), linv) +
# sps.kron(linv, sps.eye(nv))))
# that seems a fast option but too memory consuming for my laptop
HL = []
for k in range(nv):
ek = np.zeros((nv, 1))
ek[k] = 1
H1k = sps.csr_matrix(H*np.kron(ek, linv))
H2k = sps.csr_matrix(H*np.kron(linv, ek))
HL.append(H1k + H2k)
HLm = sps.hstack(HL)
assert np.linalg.norm(2*H*np.kron(linv, linv) - HLm*linv) < 1e-12
dou.save_spa(HLm, hlstr)
return HLm
def test_qbdae_ass(problemname='cylinderwake', N=1, Re=None, nu=3e-2,
t0=0.0, tE=1.0, Nts=100, use_saved_mats=None):
trange = np.linspace(t0, tE, Nts+1)
DT = (tE-t0)/Nts
rdir = 'results/'
ddir = 'data/'
if use_saved_mats is None:
femp, stokesmatsc, rhsd_vfrc, rhsd_stbc = \
dns.problem_setups.get_sysmats(problem=problemname, N=N, Re=Re)
invinds = femp['invinds']
A, J, M = stokesmatsc['A'], stokesmatsc['J'], stokesmatsc['M']
L = 0*A
fvc, fpc = rhsd_vfrc['fvc'], rhsd_vfrc['fpr']
fv_stbc, fp_stbc = rhsd_stbc['fv'], rhsd_stbc['fp']
hstr = ddir + problemname + '_N{0}_hmat'.format(N)
try:
hmat = dou.load_spa(hstr)
print('loaded `hmat`')
except IOError:
print('assembling hmat ...')
hmat = dnsts.ass_convmat_asmatquad(W=femp['V'], invindsw=invinds)
dou.save_spa(hmat, hstr)
invinds = femp['invinds']
NV, NP = invinds.shape[0], J.shape[0]
zerv = np.zeros((NV, 1))
bc_conv, bc_rhs_conv, rhsbc_convbc = \
snu.get_v_conv_conts(prev_v=zerv, V=femp['V'], invinds=invinds,
diribcs=femp['diribcs'], Picard=False)
fp = fp_stbc + fpc
fv = fv_stbc + fvc - bc_rhs_conv
if linnsesol:
vp_nse, _ = snu.\
solve_steadystate_nse(A=A, J=J, JT=None, M=M,
fv=fv_stbc + fvc,
fp=fp_stbc + fpc,
V=femp['V'], Q=femp['Q'],
invinds=invinds,
diribcs=femp['diribcs'],
return_vp=False, ppin=-1,
N=N, nu=nu,
clearprvdata=False)
old_v = vp_nse[:NV]
else:
import sadptprj_riclyap_adi.lin_alg_utils as lau
# Stokes solution as initial value
vp_stokes = lau.solve_sadpnt_smw(amat=A, jmat=J,
rhsv=fv_stbc + fvc,
rhsp=fp_stbc + fpc)
old_v = vp_stokes[:NV]
sysmat = sps.vstack([sps.hstack([M+DT*(A+bc_conv), J.T]),
sps.hstack([J, sps.csc_matrix((NP, NP))])])
if use_saved_mats is not None:
# if saved as in ../get_exp_mats
import scipy.io
mats = scipy.io.loadmat(use_saved_mats)
A = - mats['A']
L = - mats['L']
Re = mats['Re']
N = A.shape[0]
M = mats['M']
J = mats['J']
hmat = -mats['H']
fv = mats['fv']
fp = mats['fp']
NV, NP = fv.shape[0], fp.shape[0]
old_v = mats['ss_stokes']
sysmat = sps.vstack([sps.hstack([M+DT*A, J.T]),
sps.hstack([J, sps.csc_matrix((NP, NP))])])
if compevs:
import matplotlib.pyplot as plt
import scipy.linalg as spla
hlstr = ddir + problemname + '_N{0}_Re{1}Nse{2}_hlmat'.\
format(N, Re, linnsesol)
HL = linearzd_quadterm(hmat, old_v, hlstr=hlstr)
print(HL.shape)
asysmat = sps.vstack([sps.hstack([-(A-L+HL), J.T]),
sps.hstack([J, sps.csc_matrix((NP, NP))])])
msysmat = sps.vstack([sps.hstack([M, sps.csc_matrix((NV, NP))]),
sps.hstack([sps.csc_matrix((NP, NV)),
sps.csc_matrix((NP, NP))])])
levstr = ddir + problemname + '_N{0}Re{1}Nse{2}_levs'.\
format(N, Re, linnsesol)
try:
levs = dou.load_npa(levstr)
if debug:
raise IOError()
print('loaded the eigenvalues of the linearized system')
except IOError:
print('computing the eigenvalues of the linearized system')
A = asysmat.todense()
M = msysmat.todense()
levs = spla.eigvals(A, M, overwrite_a=True, check_finite=False)
dou.save_npa(levs, levstr)
plt.figure(1)
# plt.xlim((-25, 15))
# plt.ylim((-50, 50))
plt.plot(np.real(levs), np.imag(levs), '+')
plt.show(block=False)
if timeint:
print('computing LU once...')
sysmati = spsla.factorized(sysmat)
vfile = dolfin.File(rdir + problemname + 'qdae__vel.pvd')
pfile = dolfin.File(rdir + problemname + 'qdae__p.pvd')
prvoutdict = dict(V=femp['V'], Q=femp['Q'], vfile=vfile, pfile=pfile,
invinds=invinds, diribcs=femp['diribcs'],
vp=None, t=None, writeoutput=True)
print('doing the time loop...')
for t in trange:
crhsv = M*old_v + DT*(fv - hmat*np.kron(old_v, old_v))
crhs = np.vstack([crhsv, fp])
vp_new = np.atleast_2d(sysmati(crhs.flatten())).T
prvoutdict.update(dict(vp=vp_new, t=t))
dou.output_paraview(**prvoutdict)
old_v = vp_new[:NV]
print(t, np.linalg.norm(old_v))
if __name__ == '__main__':
# test_qbdae_ass(problemname='cylinderwake', N=1, Re=1e2, tE=2.0, Nts=800)
test_qbdae_ass(problemname='cylinderwake', tE=2.0, Nts=800,
use_saved_mats='../data/' +
# 'cylinderwakequadform__mats_N3022_Re100.mat')
'cylinderwakequadform__mats_N5812_Re100.mat')
| gpl-3.0 |
aditipawde/TimeTable1 | TimeTable1/try (copy).py | 1 | 3163 | import dataAccessSQLAlchemy as da
import pandas as pd
import random
import numpy as np
print("welcome");
f_join_subject_subjectClassTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, c.classId, teacherId from subject s, subjectClassTeacher c where s.subjectId = c.subjectId;')
f_join_subject_subjectClassTeacher.insert(5,'batchId','-')
f_join_subject_subjectClassTeacher.insert(6,'category','T') #T for theory
#f_join_subject_subjectClassTeacher.rename(columns={'classId':'classOrBatchId'}, inplace=True)
f_join_subject_subjectBatchTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, sbt.batchId, bc.classId, teacherId from subject s, subjectBatchTeacher sbt, batchClass bc where s.subjectId = sbt.subjectId AND sbt.batchId = bc.batchId;')
f_join_subject_subjectBatchTeacher.insert(6,'category','L') #L for Lab
#f_join_subject_subjectBatchTeacher.rename(columns={'batchId':'classOrBatchId'}, inplace=True)
f_subjectBatchClassTeacher = pd.concat([f_join_subject_subjectClassTeacher, f_join_subject_subjectBatchTeacher])
print(f_subjectBatchClassTeacher)
#x = f_subject.join(f_subjectBatchTeacher.set_index('subjectId'), on='subjectId')
x=f_subjectBatchClassTeacher
x=x.reset_index()
x.to_csv("x.csv")
totallectures_list = (x['totalHrs'] / x['eachSlot'])
# Create empty dataframe to save all the requirements
req_all = pd.DataFrame(index=range(int(totallectures_list.sum())), columns=list(x))
j = 0
for i in range(len(req_all)):
if((x.iloc[j]['totalHrs']/x.iloc[j]['eachSlot'])>0):
req_all.loc[[i]] = x.iloc[[j]].values
x.set_value(j,'totalHrs', x.loc[j]['totalHrs'] - x.loc[j]['eachSlot'])
if (x.iloc[j]['totalHrs'] == 0):
j = j + 1
#print(req_all)
req_all.to_csv("req_all.csv")
#These values need to be calculated from the database
n_classes=14
n_days=5
n_slots=10
n_maxlecsperslot=4
timetable_np = np.empty((n_classes, n_days, n_slots, n_maxlecsperslot))*np.nan
#print(timetable_np)
for c in (set(req_all.classId)): #First take one class
#print(c)
#http://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas
req_forgivenclass=req_all.loc[req_all['classId'] == c] #List all the requirements for that class in req_forgivenclass
#print(req_forgivenclass)
#print(set(req_forgivenclass.index)) #These are the indices of the requirements for this class
for req in set(req_forgivenclass.index): #Schedule each of these requirements
notassigned = 1
while(notassigned==1): #Keep on scheduling till not found
r_day=random.randint(0,n_days-1)
r_slot = random.randint(0, n_slots-1)
r_lecnumber=random.randint(0,n_maxlecsperslot-1)
if(np.isnan(np.sum(timetable_np[c,r_day,r_slot,r_lecnumber]))): #Check if that slot is empty, this way of using np.isnan is the fastest way of doing so
timetable_np[c,r_day,r_slot,r_lecnumber]=req
notassigned=0
arr_2d = timetable_np.reshape(70,40)
print(arr_2d)
pd.DataFrame(arr_2d).to_csv('tt_2d')
read_arr_2d =
#print(timetable_np[c,:,:,:])
| lgpl-3.0 |
bikong2/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
krikru/tensorflow-opencl | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 12 | 5024 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
structrans/Canon | canon/pattern/model.py | 1 | 5160 | import numpy as np
import logging
from timeit import default_timer as timer
from sklearn.mixture import GMM
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
class Model:
def __init__(self):
self.__n_features = None
self.__n_clusters = None
self.__preprocessors = []
self._estimator = None
self._n_features_transformed = None
def train(self, data, preprocessors=None, n_clusters=None, init=4):
n_patterns = len(data)
n_features = len(data[0])
self.__n_features = n_features
t_start = timer()
logging.debug('Pre-processing %d patterns with %d features ...' % (n_patterns, n_features))
if preprocessors is None:
preprocessors = [StandardScaler()]
for preprocessor in preprocessors:
data = preprocessor.fit_transform(data)
self.__preprocessors = preprocessors
n_features = len(data[0])
self._n_features_transformed = n_features
logging.info('Finished pre-processing of %d patterns with %d features. %.3f sec' %
(n_patterns, n_features, timer() - t_start))
self._estimator, self.__n_clusters = self._fit(data, n_clusters=n_clusters)
def _fit(self, data, n_clusters=0):
return None, n_clusters
def score(self, data):
if len(data[0]) != self.__n_features:
raise ValueError('The number of features [%d] in the data is different from that in the model [%d].' %
(len(data[0]), self.__n_features))
for preprocessor in self.__preprocessors:
data = preprocessor.transform(data)
if len(data[0]) != self._n_features_transformed:
raise ValueError(
'The number of transformed features [%d] in the data is different from that in the model [%d].' %
(len(data[0]), self._n_features_transformed))
return self._score_transformed_data(data)
def _score_transformed_data(self, data):
return [record[0] for record in data]
class GMMModel(Model):
def __init__(self, min_prob=0.8):
Model.__init__(self)
self.__min_prob = min_prob
def _fit(self, samples, n_clusters=None):
t_start = timer()
n_clusters = len(samples)
best_estimator = None
min_aic = None
while n_clusters >= 16:
n_clusters /= 2
estimator = self.gmm_fit(samples, n_clusters)
aic = estimator.aic(samples)
if min_aic is None:
min_aic = aic
if aic > min_aic and min(abs(aic), abs(min_aic)) < 0.5 * max(abs(min_aic), abs(aic)):
break
elif aic <= min_aic:
best_estimator, min_aic = estimator, aic
n_clusters = best_estimator.n_components
logging.info('Finally got a GMM model on %d patterns using %d features for %d clusters. %.3f sec. AIC = %g' %
(len(samples), self._n_features_transformed, n_clusters, timer() - t_start,
best_estimator.aic(samples)))
return best_estimator, n_clusters
def gmm_fit(self, samples, n_clusters):
t_start = timer()
n_features = len(samples[0])
logging.debug('Running GMM on %d patterns using %d features for %d clusters ...' %
(len(samples), n_features, n_clusters))
estimator = GMM(n_components=n_clusters)
estimator.fit(samples)
logging.info('Finished GMM on %d patterns using %d features for %d clusters. %.3f sec. AIC = %g' %
(len(samples), n_features, n_clusters, timer() - t_start,
estimator.aic(samples)))
return estimator
def _score_transformed_data(self, data):
labels = [None] * len(data)
probs = self._estimator.predict_proba(data)
for i, p in enumerate(probs):
max_p = np.max(p)
if max_p >= self.__min_prob:
labels[i] = (np.where(p == max_p)[0][0], max_p)
return labels
class KMeansModel(Model):
def __init__(self):
Model.__init__(self)
self._centroids = None
# self._inertia = None
def centroids(self):
return self._centroids
def _fit(self, samples, n_clusters=2, init=4):
t_start = timer()
n_features = len(samples[0])
logging.debug('Running KMeans on %d patterns using %d features for %d clusters ...' %
(len(samples), n_features, n_clusters))
estimator = KMeans(n_clusters=n_clusters, n_init=init)
estimator.fit(samples)
# estimator.fit_transform(samples)
# estimator.fit_predict(samples)
self._centroids = estimator.cluster_centers_
# self._inertia = estimator.inertia_
logging.info('Finished KMeans on %d patterns using %d features for %d clusters. %.3f sec.' %
(len(samples), n_features, n_clusters, timer() - t_start))
return estimator, n_clusters
def _score_transformed_data(self, data):
return self._estimator.predict(data)
| mit |
kiennquit2011/androguard | elsim/elsim/elsim.py | 37 | 16175 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
| apache-2.0 |
wei-Z/Python-Machine-Learning | self_practice/Chapter 5 Kernel PCA.py | 1 | 11191 | # Chapter 5 Compressing Data via Dimensionality Reduction
# Using kernel principal component analysis for nonlinear mappings
'''
Using kernel PCA, we will learn how to transform data that is not linearly spearable
onto a new, lower-dimensional subspace that is suitable for linear classifiers.'''
# Kernel functions and the kernel trick
# implementing a kernel principal component analysis in Python
'''
Now we are going to implement an RBF kernel PCA in Python following the three
steps that summaried the kernal PCA approach. Using the SciPy and Numpy helper
function's, we will see that implementing a kernel PCA is actually really simple.'''
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
'''
RBF kernel PCA implementation.
Parameters
--------------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
-------------------
X_pc: {Numpy ndarray}, shape = [n_samples, k_features]
Projected dataset
'''
# Calculate pairwise squared Euclidean distance
# in the MxN dimensional dataset
sq_dists = pdist(X, 'sqeuclidean' )
# Convert pairwise distances into a square matrix
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
# Example 1 - separating half-moon shapes
'''
Now, let's apply our rbf_kernel_pca on some nonlinear example datasets.
We will start by creating a two-dimensional dataset of 100 sample points
representing two half-moon shapes:'''
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
'''
For the purposes of illustration, the half-moon of triangular symbols shall
represent one class and the half-moon depicted by the circular symbols
represent the samples from another class:'''
'''
Clearly, these two half-moon shapes are not linearly separable and our goal
is to unfold the half-moons via kernel PCA so that the dataset can serve as a
suitable input for a linear classifier. But first, let's see what the dataset looks
like if we project it onto the principal components via standard PCA:'''
from sklearn.decomposition import PCA
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7,3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1],color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1],color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y==0, 0], np.zeros((50,1))+0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((50,1)),color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
'''
Note that when we plotted the first principal component only (right subplot),
we shifted the triangular samples slightly upwards and the circular samples
slightly downwards to better visualize the class overlap'''
'''
Now let's try out our kernel PCA function rbf_kernel_pca, which we implemented
in the previous subsection:'''
from matplotlib.ticker import FormatStrFormatter
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7,3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
'''
We can now see that the two classes (circles and triangles) are linearly well separated
so that it becomes a suitable training dataset for linear classifiers:'''
'''
Unfortuately, there is no universal value for the tuning parameter gamma that works
well for different datasets. To find a gamma value that is appropriate for a given problem
requires experimentation. '''
# Example 2 - separating concentric circles
'''
In the previous subsection, we showed how to separate half-moon shapes via
kernel-PCA. Since we put so much effort into understanding the concept of kernel
PCA, let's take a look at another interesting example of a nonlinear problem:
concetric circles.'''
from sklearn.datasets import make_circles
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
'''Let's start with the standard PCA approach to compare it with the results of the RBF
kernel PCA:'''
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y==0, 0], np.zeros((500, 1))+0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((500, 1))-0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
''' Again, we can see that standard PCA is not able to produce results suitable for
training a linear classifier.'''
''' Given an appropriate value for gamma, let's see if we are luckier using the RBF
kernel PCA implementation:'''
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((500, 1))+0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((500, 1))-0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Projecting new data points
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
'''
RBF kernel PCA implementation.
Parameters
-----------------
X: {Numpy ndarray}, shape = {n_samples, n_features}
gamma: float
Tuning parameter of the RBF kernel
n_components: int
number of principal components to return
Returns
-----------------
X_pc: {Numpy ndarray}, shape = [n_samples, k_features}
Projected dataset
lambdas: list
Eigenvalues
'''
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmertric kernel matrix
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i] for i in range(1, n_components+1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components+1)]
return alphas, lambdas
'''
Now, let's create a new half-moon dataset and project it onto a one-dimensional
subspace using the updated RBF kernel PCA implementation:'''
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
'''
To make sure that we implement the code for projecting new samples, let's assume
that the 26th point from the half-moon dataset is a new data point x', and our task is
to project it onto this new subspace:'''
x_new = X[25]
x_new
x_proj = alphas[25] # original projection
x_proj
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new-row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
'''
By executing the following code, we are able to reproduce the original projection.
Using the project_x function, we will be able to project any new data samples as
well. The code is as follows:'''
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
x_reproj
'''
Lastly, let's visualize the projection on the first principal component.'''
plt.scatter(alphas[y==0, 0], np.zeros((50)), color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y==1, 0], np.zeros((50)), color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black', label='original projection of point X[25]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green', label='remapped point X[25]', marker='x', s=500)
plt.legend(scatterpoints=1)
plt.show()
'''
As we can see in the following scatterplot, we mapped the sample x' onto the first
principal component correctly:'''
# Kernel principal component analysis in scikit-learn
'''
For our convenience, scikit-learn implements a kernel PCA class in the
sklearn.decomposition submodule. The usage is similar to the standard
PCA class, and we can specify the kernel via the kernel parameter:'''
from sklearn.decomposition import KernelPCA
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca=scikit_kpca.fit_transform(X)
'''
To see if we get results that are consistent with our own kernel PCA
implementation, let's plot the transformed half-moon shape data onto the
first two principal components:'''
plt.scatter(X_skernpca[y==0, 0], X_skernpca[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y==1, 0], X_skernpca[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.plt.show()
| mit |
ColumbiaCMB/kid_readout | kid_readout/measurement/mmw_source_sweep.py | 1 | 11492 | from __future__ import division
import time
import numpy as np
import pandas as pd
import logging
from memoized_property import memoized_property
# The ZBD object loads a few data files from disk. If this import fails then the functions that use it below will still
# work, but only with default arguments.
try:
from equipment.vdi.zbd import ZBD
zbd = ZBD()
except ImportError:
zbd = None
from kid_readout.measurement import core, basic
logger = logging.getLogger(__file__)
class MMWSweepList(basic.SweepStreamList):
def __init__(self, sweep, stream_list, state, description=''):
super(MMWSweepList, self).__init__(sweep=sweep, stream_list=stream_list, state=state, description=description)
def single_sweep_stream_list(self, index):
return MMWResponse(self.sweep.sweep(index),
core.MeasurementList(sa.stream(index) for sa in self.stream_list),
number=index,
state=self.state, description=self.description)
def to_dataframe(self,add_origin=True):
rows = []
for number in range(self.sweep.num_channels):
sssl = self.single_sweep_stream_list(number)
this_df = sssl.to_dataframe()
rows.append(this_df)
return pd.concat(rows,ignore_index=True)
class MMWResponse(basic.SingleSweepStreamList):
def __init__(self, single_sweep, stream_list, state, number=0, description=''):
super(MMWResponse,self).__init__(single_sweep=single_sweep, stream_list=stream_list, state=state, number=number,
description=description)
@property
def lockin_rms_voltage(self):
try:
return np.array(self.state_vector('lockin','rms_voltage'),dtype='float')
except KeyError:
return np.nan
def zbd_power(self, linearize=False):
return zbd_voltage_to_power(self.zbd_voltage(linearize=linearize), mmw_frequency=self.mmw_frequency)
def zbd_voltage(self, linearize=False):
return lockin_rms_to_zbd_voltage(self.lockin_rms_voltage, linearize=linearize)
@property
def hittite_frequency(self):
return np.array(self.state_vector('hittite','frequency'), dtype='float')
@property
def mmw_frequency(self):
return 12.*self.hittite_frequency
@memoized_property
def sweep_stream_list(self):
return self.get_sweep_stream_list()
def get_sweep_stream_list(self, deglitch=False):
result = []
for stream in self.stream_list:
sss = basic.SingleSweepStream(sweep=self.single_sweep, stream=stream, state=stream.state,
description=stream.description)
result.append(sss)
return result
@memoized_property
def folded_x(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fx = sss.fold(sss.x_raw)
# TODO: this is a hack
xfft = np.fft.rfft(fx)
phase = np.angle(xfft[1])
roll_by = 0
try:
roll_by = int(np.round(phase*fx.shape[0]/(2*np.pi))) + fx.shape[0]//4
except ValueError:
logger.debug("NaN values encounterd while trying to fold data for stream %r. Data won't be aligned" %
sss.number)
result.append(np.roll(fx,roll_by))
return np.array(result)
@memoized_property
def folded_q(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fq = sss.fold(sss.q_raw)
result.append(fq)
return np.array(result)
@memoized_property
def folded_normalized_s21(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fs21 = sss.fold(sss.normalized_s21)
result.append(fs21)
return np.array(result)
@memoized_property
def folded_s21_raw(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fs21 = sss.fold(sss.stream.s21_raw)
result.append(fs21)
return np.array(result)
@memoized_property
def fractional_frequency_response(self):
return self.get_fractional_frequency_response()
def get_fractional_frequency_response(self):
folded = self.folded_x
period = folded.shape[-1]
template = np.ones((period,),dtype='float')
template[:period//2] = -1
response = np.abs(np.fft.irfft(np.fft.rfft(template)*np.fft.rfft(folded,axis=-1),axis=-1)*2./period).max(-1)
return response
def to_dataframe(self, add_origin=True):
data = {'number': self.number, 'analysis_epoch':time.time(), 'start_epoch':self.start_epoch()}
try:
for thermometer, temperature in self.state['temperature'].items():
data['temperature_{}'.format(thermometer)] = temperature
except KeyError:
pass
try:
for key, value in self.stream_list[0].roach_state.items():
data['roach_{}'.format(key)] = value
except KeyError:
pass
flat_state = self.state.flatten(wrap_lists=True)
data.update(flat_state)
for param in self.single_sweep.resonator.current_result.params.values():
data['res_{}'.format(param.name)] = param.value
data['res_{}_error'.format(param.name)] = param.stderr
data['res_redchi'] = self.single_sweep.resonator.current_result.redchi
data['res_Q_i'] = self.single_sweep.resonator.Q_i
data['res_Q_e'] = self.single_sweep.resonator.Q_e
data['res_s21_data'] = [self.single_sweep.resonator.data]
data['res_frequency_data'] = [self.single_sweep.resonator.frequency]
data['res_s21_errors'] = [self.single_sweep.resonator.errors]
modelf = np.linspace(self.single_sweep.resonator.frequency.min(),self.single_sweep.resonator.frequency.max(),1000)
models21 = self.single_sweep.resonator.model.eval(params=self.single_sweep.resonator.current_params,f=modelf)
data['res_model_frequency'] = [modelf]
data['res_model_s21'] = [models21]
data['fractional_frequency_response'] = [self.fractional_frequency_response]
data['folded_s21_raw'] = [self.folded_s21_raw]
data['folded_x'] = [self.folded_x]
data['mmw_frequency'] = [self.mmw_frequency]
data['lockin_rms_voltage'] = [self.lockin_rms_voltage]
data['zbd_power_linearized'] = [self.zbd_power(linearize=True)]
dataframe = pd.DataFrame(data, index=[0])
if add_origin:
self.add_origin(dataframe)
return dataframe
class MMWSweepOnMod(core.Measurement):
_version = 0
def __init__(self, sweep, off_stream, on_stream, mod_stream, state=None, description=''):
self.sweep = sweep
self.on_stream = on_stream
self.mod_stream = mod_stream
if off_stream:
self.off_stream = off_stream
else:
self.off_stream = None
super(MMWSweepOnMod, self).__init__(state=state, description=description)
@property
def on_sweep_stream_array(self):
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.on_stream,state=self.state,
description=self.description)
@property
def off_sweep_stream_array(self):
if self.off_stream:
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.off_stream,state=self.state,
description=self.description)
else:
raise AttributeError("No off stream measurement defined")
@property
def mod_sweep_stream_array(self):
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.mod_stream,state=self.state,
description=self.description)
def sweep_stream_set(self,number):
sweep = self.sweep.sweep(number)
on_sweep_stream = self.on_stream.stream(number)
mod_sweep_stream = self.mod_stream.stream(number)
try:
off_sweep_stream = self.off_stream.stream(number)
except AttributeError:
off_sweep_stream = None
if off_sweep_stream:
return (basic.SingleSweepStream(sweep,off_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,on_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,mod_sweep_stream,number=number,state=self.state,
description=self.description),
)
else:
return (None,
basic.SingleSweepStream(sweep,on_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,mod_sweep_stream,number=number,state=self.state,
description=self.description),
)
def to_dataframe(self, add_origin=True):
on_rows = []
mod_rows = []
off_rows = []
for n in range(self.sweep.num_channels):
off_ss, on_ss, mod_ss = self.sweep_stream_set(n)
on_rows.append(on_ss.to_dataframe())
mod_rows.append(mod_ss.to_dataframe())
if off_ss:
off_rows.append(off_ss.to_dataframe())
df_on = pd.concat(on_rows,ignore_index=True)
df_mod = pd.concat(mod_rows,ignore_index=True)
dfs = [df_on,df_mod]
if off_rows:
df_off = pd.concat(off_rows,ignore_index=True)
dfs.append(df_off)
else:
df_off = None
if add_origin:
if self._io is None:
self.sweep.add_origin(df_on,prefix='sweep_')
self.on_stream.add_origin(df_on,prefix='stream_')
self.sweep.add_origin(df_mod,prefix='sweep_')
self.mod_stream.add_origin(df_mod,prefix='stream_')
if off_rows:
self.sweep.add_origin(df_off,prefix='sweep_')
self.off_stream.add_origin(df_off,prefix='stream_')
else:
self.add_origin(df_on)
self.add_origin(df_mod)
if off_rows:
self.add_origin(df_off)
df_on['lockin_rms_voltage'] = df_mod['lockin_rms_voltage']
if df_off is not None:
df_off['lockin_rms_voltage'] = df_mod['lockin_rms_voltage']
return pd.concat(dfs,ignore_index=True)
def lockin_rms_to_zbd_voltage(lockin_rms_voltage, linearize=False):
zbd_voltage = (np.pi / np.sqrt(2)) * lockin_rms_voltage
if linearize:
zbd_voltage /= zbd.linearity(zbd_voltage)
return zbd_voltage
def zbd_voltage_to_power(zbd_voltage, mmw_frequency=None):
if mmw_frequency is None:
volts_per_watt = 2200 # 2200 V/W is the approximate responsivity
else:
volts_per_watt = zbd.responsivity(mmw_frequency)
return zbd_voltage / volts_per_watt
| bsd-2-clause |
ScottHull/fEquilibrium | dynamics/Movement.py | 1 | 6961 | import pandas as pd
import numpy as np
from math import pi, sqrt, exp
import time
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.'))); from meta.Console import console
# Some methods extracted from:
# Mechanisms of metal-silicate equilibration in the terrestrial magma ocean
# D.C. Rubie a;, H.J. Melosh b, J.E. Reid a, C. Liebske a, K. Righter b
class gravity:
"""
Calculates the gravity a body is subjected to as a function of depth.
"""
def __init__(self):
pass
class move_particle:
def __init__(self, body_type, system_params):
self.body_type = body_type
self.system_params = system_params
def viscosity(self, material, pressure, temperature):
"""
A calculation of viscosity using the diffusion coefficient. Diffusion is an act of Gibbs Free Energy minimization,
where atoms diffuse down a concentration gradient to minimum energy configuration. Diffusion is related to
the viscosity of the material.
:param material: the name of the material, as listed in 'physical_parameters.csv' in this file's working directory
:return: viscosity, Pa*s = (N*s)/m^2=kg/(s*m)
"""
material_properties = pd.read_csv("dynamics/physical_parameters.csv", index_col='Material')
if pd.isnull(material_properties['Viscosity'][material]):
gas_const = 8.312 # J/mol*k
boltzmann_const = 1.3806 * 10**-23 # (m^2*kg)/(s^2*degK)
D_not = material_properties['D_not'][material] # diffusion equation param, the diffusion coefficient
H_star = material_properties['H*'][material] # diffusion equation param, the activation enthalpy
V_star = material_properties['V*'][material] # diffusion equation param, the activation volume
lambda_ = material_properties['Lambda_O'][material] # viscosity param, the jump distance for diffusing ions
D = D_not * exp((-H_star + (pressure * V_star)) / (gas_const * temperature)) # the diffusion equation, D=D_not*exp(-(H*+PV*)/R*T)
viscosity = (boltzmann_const * temperature) / (D * lambda_) # reworked from D_(Si,O)=(boltzmann*T)/(D*lambda)
print("calculated viscosity: {} (pressure={}, temperature={})".format(viscosity, pressure, temperature))
return viscosity
else:
viscosity = material_properties['Viscosity'][material]
return viscosity
def friction_coefficient(self, density_body, density_matrix, diameter_body, matrix_viscosity):
"""
A dimensionless parameter to determine the mode of Stoke's settling.
:param density_body: density of the body, in kg/m^3
:param density_matrix: density of the matrix, in kg/m^3
:param diameter_body: diameter of the body, in m
:param matrix_viscosity: viscosity of the matrix, in Pa*s
:return: f, friction coefficient (dimensionless)
"""
grav_constant = 9.81 # m/s^2
f = (pi/6) * ((density_body - density_matrix) / density_matrix) * \
(density_matrix / matrix_viscosity)**2 * (grav_constant * diameter_body**3)
return f
# def stokes_settling(self, density_body, density_matrix, diameter_body, matrix_viscosity, drag_coeff=0.2):
# TODO: return 3-component array such that an x, y, and z velocity are received
def stokes_settling(self, object, object_radius, matrix_material, matrix_material_temp, matrix_material_pressure):
"""
If friction coefficient, F, is <10 or >10, the modes of Stoke's settling are described as below as a result of
differences in laminar versus turbulent flow.
:param density_body: density of the body, in kg/m^3
:param density_matrix: density of the matrix, in kg/m^3
:param diameter_body: diameter of the body, in m
:param matrix_viscosity: viscosity of the matrix, in Pa*s
:param drag_coeff: asymptotic value of 0.2 at high frictional coefficients
:return: v, settling velocity
"""
material_properties = pd.read_csv("dynamics/physical_parameters.csv", index_col='Material')
density_body = material_properties['Density'][object]
density_matrix = material_properties['Density'][matrix_material]
drag_coeff = material_properties['Drag Coefficient'][object]
matrix_viscosity = self.viscosity(material=matrix_material, pressure=matrix_material_pressure,
temperature=matrix_material_temp)
diameter_body = object_radius * 2.0 # diameter=radius*2
grav_constant = 9.81 # m/s^2
f = self.friction_coefficient(density_body=density_body, density_matrix=density_matrix,
diameter_body=diameter_body, matrix_viscosity=matrix_viscosity)
if f < 10: # low frictional coefficient, when body is in laminar flow regime
v = ((density_body - density_matrix) * grav_constant * diameter_body**2) / (18 * matrix_viscosity) # calculates the velocity of the body
# console.pm_flush(
# "f: {}, velocity: {}, matrix_viscosity: {}, matrix_material: {}".format(f, v, matrix_viscosity,
# matrix_material))
return v
else:
v = sqrt(((4 / (3 * drag_coeff)) * (((density_body - density_matrix) / density_matrix) * (grav_constant * diameter_body))))
# console.pm_flush(
# "f: {}, velocity: {}, matrix_viscosity: {}, matrix_material: {}".format(f, v, matrix_viscosity,
# matrix_material))
return v
class droplet_size:
"""
Predicts Fe-molten-alloy droplet size via the dimensionless Weber number.
W = ((rho_m - rho_s)*d*v^2)/sigma.
Settling velocity determined via Stoke's Law, when flow regime is lamellar or equation incorporates a drag
coefficient when flow around falling droplet is turbulent.
"""
def __init__(self, body_type):
self.body_type = body_type
def weber_number(self, density_body, density_matrix, diameter_body, surface_energy, settling_velocity):
"""
A dimensionless number that is a ratio of stagnation pressure and internal pressure caused by surface tension.
This number determines when a falling droplet becomes stable as it escapes fluid instabilities such as Rayleigh-Taylor
or Kelvin-Helmholtz. Usually stable when Weber number falls to ~10.
:param density_body:
:param density_matrix:
:param diameter_body:
:param surface_energy:
:param settling_velocity:
:return: w, Weber number (dimensionless)
"""
w = ((density_body - density_matrix) * diameter_body * settling_velocity) / surface_energy
return w
| apache-2.0 |
depet/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 4 | 3498 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC
from sklearn.utils import check_random_state
from sklearn.metrics.scorer import SCORERS
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(X_r_sparse.toarray(), X_r)
# Test using a customized loss function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3,
loss_func=zero_one_loss)
with warnings.catch_warnings(record=True):
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
# Test using a scorer
scorer = SCORERS['accuracy']
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/io/formats/console.py | 7 | 2741 | """
Internal module for console introspection
"""
import sys
import locale
from pandas.io.formats.terminal import get_terminal_size
# -----------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slighly modified from the way IPython handles the same issue.
"""
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except AttributeError:
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
from pandas import get_option
from pandas.core import common as com
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.height', silent=True)
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if com.in_interactive_session():
if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
| mit |
liyu1990/sklearn | sklearn/linear_model/ransac.py | 25 | 14262 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
apache/spark | python/pyspark/sql/pandas/functions.py | 22 | 28130 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import warnings
from inspect import getfullargspec
from pyspark.rdd import PythonEvalType
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
from pyspark.sql.types import DataType
from pyspark.sql.udf import _create_udf
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a pandas user defined function (a.k.a. vectorized user defined function).
Pandas UDFs are user defined functions that are executed by Spark using Arrow to transfer
data and Pandas to work with the data, which allows vectorized operations. A Pandas UDF
is defined using the `pandas_udf` as a decorator or to wrap the function, and no
additional configuration is required. A Pandas UDF behaves as a regular PySpark function
API in general.
.. versionadded:: 2.3.0
Parameters
----------
f : function, optional
user-defined function. A python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
functionType : int, optional
an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR. This parameter exists for compatibility.
Using Python type hints is encouraged.
Examples
--------
In order to use this API, customarily the below are imported:
>>> import pandas as pd
>>> from pyspark.sql.functions import pandas_udf
From Spark 3.0 with Python 3.6+, `Python type hints <https://www.python.org/dev/peps/pep-0484>`_
detect the function types as below:
>>> @pandas_udf(IntegerType())
... def slen(s: pd.Series) -> pd.Series:
... return s.str.len()
Prior to Spark 3.0, the pandas UDF used `functionType` to decide the execution type as below:
>>> from pyspark.sql.functions import PandasUDFType
>>> from pyspark.sql.types import IntegerType
>>> @pandas_udf(IntegerType(), PandasUDFType.SCALAR)
... def slen(s):
... return s.str.len()
It is preferred to specify type hints for the pandas UDF instead of specifying pandas UDF
type via `functionType` which will be deprecated in the future releases.
Note that the type hint should use `pandas.Series` in all cases but there is one variant
that `pandas.DataFrame` should be used for its input or output type hint instead when the input
or output column is of :class:`pyspark.sql.types.StructType`. The following example shows
a Pandas UDF which takes long column, string column and struct column, and outputs a struct
column. It requires the function to specify the type hints of `pandas.Series` and
`pandas.DataFrame` as below:
>>> @pandas_udf("col1 string, col2 long")
>>> def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
... s3['col2'] = s1 + s2.str.len()
... return s3
...
>>> # Create a Spark DataFrame that has three columns including a struct column.
... df = spark.createDataFrame(
... [[1, "a string", ("a nested string",)]],
... "long_col long, string_col string, struct_col struct<col1:string>")
>>> df.printSchema()
root
|-- long_column: long (nullable = true)
|-- string_column: string (nullable = true)
|-- struct_column: struct (nullable = true)
| |-- col1: string (nullable = true)
>>> df.select(func("long_col", "string_col", "struct_col")).printSchema()
|-- func(long_col, string_col, struct_col): struct (nullable = true)
| |-- col1: string (nullable = true)
| |-- col2: long (nullable = true)
In the following sections, it describes the combinations of the supported type hints. For
simplicity, `pandas.DataFrame` variant is omitted.
* Series to Series
`pandas.Series`, ... -> `pandas.Series`
The function takes one or more `pandas.Series` and outputs one `pandas.Series`.
The output of the function should always be of the same length as the input.
>>> @pandas_udf("string")
... def to_upper(s: pd.Series) -> pd.Series:
... return s.str.upper()
...
>>> df = spark.createDataFrame([("John Doe",)], ("name",))
>>> df.select(to_upper("name")).show()
+--------------+
|to_upper(name)|
+--------------+
| JOHN DOE|
+--------------+
>>> @pandas_udf("first string, last string")
... def split_expand(s: pd.Series) -> pd.DataFrame:
... return s.str.split(expand=True)
...
>>> df = spark.createDataFrame([("John Doe",)], ("name",))
>>> df.select(split_expand("name")).show()
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of the input is not that of the whole input column, but is the
length of an internal batch used for each call to the function.
* Iterator of Series to Iterator of Series
`Iterator[pandas.Series]` -> `Iterator[pandas.Series]`
The function takes an iterator of `pandas.Series` and outputs an iterator of
`pandas.Series`. In this case, the created pandas UDF instance requires one input
column when this is called as a PySpark column. The length of the entire output from
the function should be the same length of the entire input; therefore, it can
prefetch the data from the input iterator as long as the lengths are the same.
It is also useful when the UDF execution
requires initializing some states although internally it works identically as
Series to Series case. The pseudocode below illustrates the example.
.. highlight:: python
.. code-block:: python
@pandas_udf("long")
def calculate(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
# Do some expensive initialization with a state
state = very_expensive_initialization()
for x in iterator:
# Use that state for whole iterator.
yield calculate_with_state(x, state)
df.select(calculate("value")).show()
>>> from typing import Iterator
>>> @pandas_udf("long")
... def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
... for s in iterator:
... yield s + 1
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.select(plus_one(df.v)).show()
+-----------+
|plus_one(v)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
.. note:: The length of each series is the length of a batch internally used.
* Iterator of Multiple Series to Iterator of Series
`Iterator[Tuple[pandas.Series, ...]]` -> `Iterator[pandas.Series]`
The function takes an iterator of a tuple of multiple `pandas.Series` and outputs an
iterator of `pandas.Series`. In this case, the created pandas UDF instance requires
input columns as many as the series when this is called as a PySpark column.
Otherwise, it has the same characteristics and restrictions as Iterator of Series
to Iterator of Series case.
>>> from typing import Iterator, Tuple
>>> from pyspark.sql.functions import struct, col
>>> @pandas_udf("long")
... def multiply(iterator: Iterator[Tuple[pd.Series, pd.DataFrame]]) -> Iterator[pd.Series]:
... for s1, df in iterator:
... yield s1 * df.v
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.withColumn('output', multiply(col("v"), struct(col("v")))).show()
+---+------+
| v|output|
+---+------+
| 1| 1|
| 2| 4|
| 3| 9|
+---+------+
.. note:: The length of each series is the length of a batch internally used.
* Series to Scalar
`pandas.Series`, ... -> `Any`
The function takes `pandas.Series` and returns a scalar value. The `returnType`
should be a primitive data type, and the returned scalar can be either a python primitive
type, e.g., int or float or a numpy data type, e.g., numpy.int64 or numpy.float64.
`Any` should ideally be a specific scalar type accordingly.
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> df.groupby("id").agg(mean_udf(df['v'])).show()
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This UDF can also be used as window functions as below:
>>> from pyspark.sql import Window
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> w = Window.partitionBy('id').orderBy('v').rowsBetween(-1, 0)
>>> df.withColumn('mean_v', mean_udf("v").over(w)).show()
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
Notes
-----
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined `returnType` (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
Currently,
:class:`pyspark.sql.types.ArrayType` of :class:`pyspark.sql.types.TimestampType` and
nested :class:`pyspark.sql.types.StructType`
are currently not supported as output types.
See Also
--------
pyspark.sql.GroupedData.agg
pyspark.sql.DataFrame.mapInPandas
pyspark.sql.GroupedData.applyInPandas
pyspark.sql.PandasCogroupedOps.applyInPandas
pyspark.sql.UDFRegistration.register
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)| A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| 86400000000000| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| 'a'| X| X| X| X| X| 'A'| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')|bytearray(b'A')| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+---------------+--------------------------------+ # noqa #
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 1.1.1 and PyArrow 1.0.1 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
require_minimum_pandas_version()
require_minimum_pyarrow_version()
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = None
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = None
if return_type is None:
raise ValueError("Invalid return type: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
None]: # None means it should infer the type from type hints.
raise ValueError("Invalid function type: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_pandas_udf, returnType=return_type, evalType=eval_type)
else:
return _create_pandas_udf(f=f, returnType=return_type, evalType=eval_type)
def _create_pandas_udf(f, returnType, evalType):
argspec = getfullargspec(f)
# pandas UDF by type hints.
from inspect import signature
if evalType in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
warnings.warn(
"In Python 3.6+ and Spark 3.0+, it is preferred to specify type hints for "
"pandas UDF instead of specifying pandas UDF type which will be deprecated "
"in the future releases. See SPARK-28264 for more details.", UserWarning)
elif evalType in [PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
# In case of 'SQL_GROUPED_MAP_PANDAS_UDF', deprecation warning is being triggered
# at `apply` instead.
# In case of 'SQL_MAP_PANDAS_ITER_UDF' and 'SQL_COGROUPED_MAP_PANDAS_UDF', the
# evaluation type will always be set.
pass
elif len(argspec.annotations) > 0:
evalType = infer_eval_type(signature(f))
assert evalType is not None
if evalType is None:
# Set default is scalar UDF.
evalType = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) and \
len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udf with function type GROUPED_MAP or "
"the function in groupby.applyInPandas "
"must take either one argument (data) or two arguments (key, data).")
if evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (2, 3):
raise ValueError(
"Invalid function: the function in cogroup.applyInPandas "
"must take either two arguments (left, right) "
"or three arguments (key, left, right).")
return _create_udf(f, returnType, evalType)
| apache-2.0 |
wilsonkichoi/zipline | zipline/data/minute_bars.py | 2 | 33297 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from os.path import join
from textwrap import dedent
from cachetools import LRUCache
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import numpy as np
import pandas as pd
from zipline.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from zipline.gens.sim_engine import NANOS_IN_MINUTE
from zipline.utils.cli import maybe_show_progress
from zipline.utils.memoize import lazyval
US_EQUITIES_MINUTES_PER_DAY = 390
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 1000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
first_trading_day : datetime-like
UTC midnight of the first day available in the dataset.
minute_index : pd.DatetimeIndex
The minutes which act as an index into the corresponding values
written into each sid's ctable.
market_opens : pd.DatetimeIndex
The market opens for each day in the data set. (Not yet required.)
market_closes : pd.DatetimeIndex
The market closes for each day in the data set. (Not yet required.)
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
"""
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
first_trading_day = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
market_opens = pd.to_datetime(raw_data['market_opens'],
unit='m',
utc=True)
market_closes = pd.to_datetime(raw_data['market_closes'],
unit='m',
utc=True)
ohlc_ratio = raw_data['ohlc_ratio']
return cls(first_trading_day,
market_opens,
market_closes,
ohlc_ratio)
def __init__(self, first_trading_day,
market_opens,
market_closes,
ohlc_ratio):
self.first_trading_day = first_trading_day
self.market_opens = market_opens
self.market_closes = market_closes
self.ohlc_ratio = ohlc_ratio
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
minute_index : list of integers
nanosecond integer representation of the minutes, the enumeration
of which corresponds to the values in each bcolz carray.
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
"""
metadata = {
'first_trading_day': str(self.first_trading_day.date()),
'market_opens': self.market_opens.values.
astype('datetime64[m]').
astype(np.int64).tolist(),
'market_closes': self.market_closes.values.
astype('datetime64[m]').
astype(np.int64).tolist(),
'ohlc_ratio': self.ohlc_ratio,
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
first_trading_day : datetime
The first trading day in the data set.
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
market_opens : pd.Series
The market opens used as a starting point for each periodic span of
minutes in the index.
The index of the series is expected to be a DatetimeIndex of the
UTC midnight of each trading day.
The values are datetime64-like UTC market opens for each day in the
index.
market_closes : pd.Series
The market closes that correspond with the market opens,
The index of the series is expected to be a DatetimeIndex of the
UTC midnight of each trading day.
The values are datetime64-like UTC market opens for each day in the
index.
The closes are written so that the reader can filter out non-market
minutes even though the tail end of early closes are written in
the data arrays to keep a regular shape.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
ohlc_ratio : int, optional
The ratio by which to multiply the pricing data to convert the
floats from floats to an integer to fit within the np.uint32.
The default is 1000 to support pricing data which comes in to the
thousands place.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
first_trading_day,
rootdir,
market_opens,
market_closes,
minutes_per_day,
ohlc_ratio=OHLC_RATIO,
expectedlen=DEFAULT_EXPECTEDLEN):
self._rootdir = rootdir
self._first_trading_day = first_trading_day
self._market_opens = market_opens[
market_opens.index.slice_indexer(start=self._first_trading_day)]
self._market_closes = market_closes[
market_closes.index.slice_indexer(start=self._first_trading_day)]
self._trading_days = market_opens.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._ohlc_ratio = ohlc_ratio
self._minute_index = _calc_minute_index(
self._market_opens, self._minutes_per_day)
metadata = BcolzMinuteBarMetadata(
self._first_trading_day,
self._market_opens,
self._market_closes,
self._ohlc_ratio,
)
metadata.write(self._rootdir)
@property
def first_trading_day(self):
return self._first_trading_day
def sidpath(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
num_days = data['shape'][0] / self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._trading_days[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters:
-----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint32)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters:
-----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._trading_days
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e)
def write_sid(self, sid, df):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters:
-----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols)
def write_cols(self, sid, dts, cols):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters:
-----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols)
def _write_cols(self, sid, dts, cols):
"""
Internal method for `write_cols` and `write`.
Parameters:
-----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._trading_days
input_first_day = pd.Timestamp(dts[0].astype('datetime64[D]'),
tz='UTC')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = dts[-1]
# In the event that we've already written some minutely data to the
# ctable, guard against overwritting that data.
if num_rec_mins > 0:
last_recorded_minute = np.datetime64(all_minutes[num_rec_mins - 1])
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self._ohlc_ratio
def convert_col(col):
"""Adapt float column into a uint32 column.
"""
return (np.nan_to_num(col) * ohlc_ratio).astype(np.uint32)
open_col[dt_ixs] = convert_col(cols['open'])
high_col[dt_ixs] = convert_col(cols['high'])
low_col[dt_ixs] = convert_col(cols['low'])
close_col[dt_ixs] = convert_col(cols['close'])
vol_col[dt_ixs] = cols['volume'].astype(np.uint32)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
class BcolzMinuteBarReader(object):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters:
-----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
def __init__(self, rootdir, sid_cache_size=1000):
self._rootdir = rootdir
metadata = self._get_metadata()
self._first_trading_day = metadata.first_trading_day
self._market_opens = metadata.market_opens
self._market_open_values = metadata.market_opens.values.\
astype('datetime64[m]').astype(np.int64)
self._market_closes = metadata.market_closes
self._market_close_values = metadata.market_closes.values.\
astype('datetime64[m]').astype(np.int64)
self._ohlc_inverse = 1.0 / metadata.ohlc_ratio
self._carrays = {
field: LRUCache(maxsize=sid_cache_size)
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@lazyval
def last_available_dt(self):
return self._market_closes[-1]
@property
def first_trading_day(self):
return self._first_trading_day
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns:
--------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != US_EQUITIES_MINUTES_PER_DAY - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
US_EQUITIES_MINUTES_PER_DAY
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
carray = self._carrays[field][sid] = \
bcolz.carray(rootdir=self._get_carray_path(sid, field),
mode='r')
return carray
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, 'r')
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters:
-----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns:
--------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
minute_pos = self._find_position_of_minute(dt)
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_inverse
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file('volume', asset)
start_date_minutes = asset.start_date.value / NANOS_IN_MINUTE
dt_minutes = dt.value / NANOS_IN_MINUTE
if dt_minutes < start_date_minutes:
return -1
return find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minutes,
start_date_minutes,
volumes,
US_EQUITIES_MINUTES_PER_DAY
)
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values,
pos,
US_EQUITIES_MINUTES_PER_DAY
)
return pd.Timestamp(minute_epoch, tz='UTC', unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
US_EQUITIES_MINUTES_PER_DAY,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx:excl_stop - start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
out[:len(where), i][where] = values[where]
if field != 'volume':
out *= self._ohlc_inverse
results.append(out)
return results
| apache-2.0 |
trankmichael/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/_build_utils/__init__.py | 21 | 1125 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
| bsd-3-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch10/chapter.py | 20 | 4997 | import numpy as np
import mahotas as mh
image = mh.imread('scene00.jpg')
from matplotlib import pyplot as plt
plt.imshow(image)
plt.show()
image = mh.colors.rgb2grey(image, dtype=np.uint8)
plt.imshow(image) # Display the image
plt.gray()
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {}.'.format(thresh))
# Otsu threshold is 138.
plt.imshow(image > thresh)
im16 = mh.gaussian_filter(image,16)
im = mh.demos.load('lenna')
r,g,b = im.transpose(2,0,1)
r12 = mh.gaussian_filter(r, 12.)
g12 = mh.gaussian_filter(g, 12.)
b12 = mh.gaussian_filter(b, 12.)
im12 = mh.as_rgb(r12,g12,b12)
h, w = r.shape # height and width
Y, X = np.mgrid[:h,:w]
Y = Y-h/2. # center at h/2
Y = Y / Y.max() # normalize to -1 .. +1
X = X-w/2.
X = X / X.max()
C = np.exp(-2.*(X**2+ Y**2))
# Normalize again to 0..1
C = C - C.min()
C = C / C.ptp()
C = C[:,:,None] # This adds a dummy third dimension to C
ringed = mh.stretch(im*C + (1-C)*im12)
haralick_features = mh.features.haralick(image)
haralick_features_mean = np.mean(haralick_features, axis=0)
haralick_features_all = np.ravel(haralick_features)
from glob import glob
images = glob('../SimpleImageDataset/*.jpg')
features = []
labels = []
for im in images:
labels.append(im[:-len('00.jpg')])
im = mh.imread(im)
im = mh.colors.rgb2gray(im, dtype=np.uint8)
features.append(mh.features.haralick(im).ravel())
features = np.array(features)
labels = np.array(labels)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
from sklearn import cross_validation
cv = cross_validation.LeaveOneOut(len(images))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 81.1%
def chist(im):
im = im // 64
r,g,b = im.transpose((2,0,1))
pixels = 1 * r + 4 * b + 16 * g
hist = np.bincount(pixels.ravel(), minlength=64)
hist = hist.astype(float)
hist = np.log1p(hist)
return hist
features = []
for im in images:
im = mh.imread(im)
features.append(chist(im))
features = []
for im in images:
imcolor = mh.imread(im)
im = mh.colors.rgb2gray(imcolor, dtype=np.uint8)
features.append(np.concatenate([
mh.features.haralick(im).ravel(),
chist(imcolor),
]))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 95.6%
features = []
for im in images:
imcolor = mh.imread(im)
# Ignore everything in the 200 pixels close to the borders
imcolor = imcolor[200:-200, 200:-200]
im = mh.colors.rgb2gray(imcolor, dtype=np.uint8)
features.append(np.concatenate([
mh.features.haralick(im).ravel(),
chist(imcolor),
]))
sc = StandardScaler()
features = sc.fit_transform(features)
from scipy.spatial import distance
dists = distance.squareform(distance.pdist(features))
fig, axes = plt.subplots(2, 9)
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
from sklearn.grid_search import GridSearchCV
C_range = 10.0 ** np.arange(-4, 3)
grid = GridSearchCV(LogisticRegression(), param_grid={'C' : C_range})
clf = Pipeline([('preproc', StandardScaler()),
('classifier', grid)])
cv = cross_validation.KFold(len(features), 5,
shuffle=True, random_state=123)
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
from mahotas.features import surf
image = mh.demos.load('lena')
image = mh.colors.rgb2gray(image, dtype=np.uint8)
descriptors = surf.surf(image, descriptor_only=True)
from mahotas.features import surf
descriptors = surf.dense(image, spacing=16)
alldescriptors = []
for im in images:
im = mh.imread(im, as_grey=True)
im = im.astype(np.uint8)
alldescriptors.append(surf.dense(image, spacing=16))
# get all descriptors into a single array
concatenated = np.concatenate(alldescriptors)
print('Number of descriptors: {}'.format(
len(concatenated)))
# use only every 64th vector
concatenated = concatenated[::64]
from sklearn.cluster import KMeans # FIXME CAPITALIZATION
k = 256
km = KMeans(k)
km.fit(concatenated)
features = []
for d in alldescriptors:
c = km.predict(d)
features.append(
np.array([np.sum(c == ci) for ci in range(k)])
)
# build single array and convert to float
features = np.array(features, dtype=float)
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 62.6%
| mit |
MarekSalat/pov-sheet-music-reader | src/test.py | 1 | 1881 | '''
Run python test.py [N] [output]
[N] test size (default = 100)
[output] output file (default = ../matrix.txt)
Expects test&training data in ../training/
'''
__author__ = 'Matej'
import numpy as np
import cv2
from matplotlib import pyplot as plt
from knn.knn_classification import Classification
import os.path
import random
import sys
knn = Classification()
ycount = len(knn.yvector)
# create training set
full_set_size = 1001
full_set = []
for i in range(full_set_size): full_set.append(i)
# create testing set - get count
if (len(sys.argv) > 1):
if (int(sys.argv[1]) < full_set_size):
test_set_size = int(sys.argv[1])
else:
test_set_size = full_set_size
else:
test_set_size = full_set_size/10
subset_rest = random.sample(full_set, test_set_size)
if (len(sys.argv) > 2):
filename = sys.argv[2]
else:
filename = "../matrix.txt"
print "Set size:",len(subset_rest)
print ""
good = 0
bad = 0
matrix = dict()
for x in knn.yvector:
vec = dict()
for y in knn.yvector:
vec[y] = 0
matrix[x] = vec
for j in knn.yvector:
print "Testing:", j, "..."
for i in subset_rest:
img = cv2.imread('../training/' + j + '/' + j + '(' + str(i) + ').png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
if (img != None):
what, dist = knn.classify(img)
if (j == what): good += 1
else: bad += 1
matrix[j][what] += 1
# print "Expected:", j, "Got:", what, "Dist:", dist
print ""
print "Good:", good
print "Bad:", bad
print "Accuracy:", good*100.0/(test_set_size*len(knn.yvector)), "%"
# write matrix to file
file = open(filename, "w")
for i in knn.yvector:
file.write("\t" + i)
file.write("\r\n")
for i in knn.yvector:
file.write(i)
for j in knn.yvector:
file.write("\t" + str(matrix[i][j]))
file.write("\r\n")
file.close()
| mit |
Astroua/TurbuStat | turbustat/statistics/delta_variance/delta_variance.py | 2 | 35092 | # Licensed under an MIT open source license - see LICENSE
from __future__ import (print_function, absolute_import, division,
unicode_literals)
import numpy as np
from astropy import units as u
from astropy.wcs import WCS
from copy import copy
import statsmodels.api as sm
from warnings import warn
from astropy.utils.console import ProgressBar
from ..base_statistic import BaseStatisticMixIn
from ...io import common_types, twod_types, input_data
from ..stats_utils import common_scale, padwithzeros
from ..fitting_utils import check_fit_limits, residual_bootstrap
from .kernels import core_kernel, annulus_kernel
from ..stats_warnings import TurbuStatMetricWarning
from ..lm_seg import Lm_Seg
from ..convolve_wrapper import convolution_wrapper
class DeltaVariance(BaseStatisticMixIn):
"""
The delta-variance technique as described in Ossenkopf et al. (2008).
Parameters
----------
img : %(dtypes)s
The image calculate the delta-variance of.
header : FITS header, optional
Image header. Required when img is a `~numpy.ndarray`.
weights : %(dtypes)s
Weights to be used.
diam_ratio : float, optional
The ratio between the kernel sizes.
lags : numpy.ndarray or list, optional
The pixel scales to compute the delta-variance at.
nlags : int, optional
Number of lags to use.
distance : `~astropy.units.Quantity`, optional
Physical distance to the region in the data.
Examples
--------
>>> from turbustat.statistics import DeltaVariance
>>> from astropy.io import fits
>>> moment0 = fits.open("2D.fits") # doctest: +SKIP
>>> delvar = DeltaVariance(moment0) # doctest: +SKIP
>>> delvar.run(verbose=True) # doctest: +SKIP
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
def __init__(self, img, header=None, weights=None, diam_ratio=1.5,
lags=None, nlags=25, distance=None):
super(DeltaVariance, self).__init__()
# Set the data and perform checks
self.input_data_header(img, header)
self.diam_ratio = diam_ratio
if weights is None:
# self.weights = np.ones(self.data.shape)
self.weights = np.isfinite(self.data).astype(float)
else:
self.weights = input_data(weights, no_header=True)
if distance is not None:
self.distance = distance
if lags is None:
min_size = 3.0
self.lags = \
np.logspace(np.log10(min_size),
np.log10(min(self.data.shape) / 2.), nlags) * u.pix
else:
# Check if the given lags are a Quantity
# Default to pixel scales if it isn't
if not hasattr(lags, "value"):
self.lags = lags * u.pix
else:
self.lags = self._to_pixel(lags)
self._convolved_arrays = []
self._convolved_weights = []
@property
def lags(self):
'''
Lag values.
'''
return self._lags
@lags.setter
def lags(self, values):
if not isinstance(values, u.Quantity):
raise TypeError("lags must be given as an astropy.units.Quantity.")
pix_lags = self._to_pixel(values)
if np.any(pix_lags.value < 1):
raise ValueError("At least one of the lags is smaller than one "
"pixel. Remove these lags from the array.")
# Catch floating point issues in comparing to half the image shape
half_comp = (np.floor(pix_lags.value) - min(self.data.shape) / 2.)
if np.any(half_comp > 1e-10):
raise ValueError("At least one of the lags is larger than half of"
" the image size. Remove these lags from the "
"array.")
self._lags = values
@property
def weights(self):
'''
Array of weights.
'''
return self._weights
@weights.setter
def weights(self, arr):
if arr.shape != self.data.shape:
raise ValueError("Given weight array does not match the shape of "
"the given image.")
self._weights = arr
def compute_deltavar(self, allow_huge=False, boundary='wrap',
min_weight_frac=0.01, nan_treatment='fill',
preserve_nan=False,
use_pyfftw=False, threads=1,
pyfftw_kwargs={},
show_progress=True,
keep_convolve_arrays=False):
'''
Perform the convolution and calculate the delta variance at all lags.
Parameters
----------
allow_huge : bool, optional
Passed to `~astropy.convolve.convolve_fft`. Allows operations on
images larger than 1 Gb.
boundary : {"wrap", "fill"}, optional
Use "wrap" for periodic boundaries, and "fill" for non-periodic.
min_weight_frac : float, optional
Set the fraction of the peak of the weight array to mask below.
Default is 0.01. This will remove most edge artifacts, but is
not guaranteed to! Increase this value if artifacts are
encountered (this typically results in large spikes in the
delta-variance curve).
nan_treatment : {'interpolate', 'fill'}, optional
Enable to interpolate over NaNs in the convolution. Default is
'fill'.
use_pyfftw : bool, optional
Enable to use pyfftw, if it is installed.
threads : int, optional
Number of threads to use in FFT when using pyfftw.
pyfftw_kwargs : Passed to
See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
for a list of accepted kwargs.
show_progress : bool, optional
Show a progress bar while convolving the image at each lag.
keep_convolve_arrays : bool, optional
Keeps the convolved arrays at each lag. Disabled by default to
minimize memory usage.
'''
self._delta_var = np.empty((len(self.lags)))
self._delta_var_error = np.empty((len(self.lags)))
if show_progress:
bar = ProgressBar(len(self.lags))
for i, lag in enumerate(self.lags.value):
core = core_kernel(lag, self.data.shape[0], self.data.shape[1])
annulus = annulus_kernel(lag, self.diam_ratio, self.data.shape[0],
self.data.shape[1])
if boundary == "wrap":
# Don't pad for periodic boundaries
pad_weights = self.weights
pad_img = self.data * self.weights
elif boundary == "fill":
# Extend to avoid boundary effects from non-periodicity
pad_weights = np.pad(self.weights, int(lag), padwithzeros)
pad_img = np.pad(self.data, int(lag), padwithzeros) * \
pad_weights
else:
raise ValueError("boundary must be 'wrap' or 'fill'. "
"Given {}".format(boundary))
img_core = \
convolution_wrapper(pad_img, core, boundary=boundary,
fill_value=0. if nan_treatment=='fill' else np.NaN,
allow_huge=allow_huge,
nan_treatment=nan_treatment,
use_pyfftw=use_pyfftw,
threads=threads,
pyfftw_kwargs=pyfftw_kwargs)
img_annulus = \
convolution_wrapper(pad_img, annulus,
boundary=boundary,
fill_value=0. if nan_treatment=='fill' else np.NaN,
allow_huge=allow_huge,
nan_treatment=nan_treatment,
use_pyfftw=use_pyfftw,
threads=threads,
pyfftw_kwargs=pyfftw_kwargs)
weights_core = \
convolution_wrapper(pad_weights, core,
boundary=boundary,
fill_value=0. if nan_treatment=='fill' else np.NaN,
allow_huge=allow_huge,
nan_treatment=nan_treatment,
use_pyfftw=use_pyfftw,
threads=threads,
pyfftw_kwargs=pyfftw_kwargs)
weights_annulus = \
convolution_wrapper(pad_weights, annulus,
boundary=boundary,
fill_value=0. if nan_treatment=='fill' else np.NaN,
allow_huge=allow_huge,
nan_treatment=nan_treatment,
use_pyfftw=use_pyfftw,
threads=threads,
pyfftw_kwargs=pyfftw_kwargs)
cutoff_val = min_weight_frac * self.weights.max()
weights_core[np.where(weights_core <= cutoff_val)] = np.NaN
weights_annulus[np.where(weights_annulus <= cutoff_val)] = np.NaN
conv_arr = (img_core / weights_core) - \
(img_annulus / weights_annulus)
conv_weight = weights_core * weights_annulus
if preserve_nan:
conv_arr[np.isnan(pad_img)] = np.NaN
if keep_convolve_arrays:
self._convolved_arrays.append(conv_arr)
self._convolved_weights.append(weights_core * weights_annulus)
val, err = _delvar(conv_arr, conv_weight, lag)
if (val <= 0) or (err <= 0) or np.isnan(val) or np.isnan(err):
self._delta_var[i] = np.NaN
self._delta_var_error[i] = np.NaN
else:
self._delta_var[i] = val
self._delta_var_error[i] = err
if show_progress:
bar.update(i + 1)
@property
def convolve_arrays(self):
if len(self._convolved_arrays) == 0:
warn("Run `DeltaVariance.compute_deltavar` with "
"`keep_convolve_arrays=True`")
return self._convolve_arrays
@property
def convolve_weights(self):
if len(self._convolved_weights) == 0:
warn("Run `DeltaVariance.compute_deltavar` with "
"`keep_convolve_arrays=True`")
return self._convolve_arrays
@property
def delta_var(self):
'''
Delta Variance values.
'''
return self._delta_var
@property
def delta_var_error(self):
'''
1-sigma errors on the Delta variance values.
'''
return self._delta_var_error
def fit_plaw(self, xlow=None, xhigh=None, brk=None, verbose=False,
bootstrap=False, bootstrap_kwargs={},
**fit_kwargs):
'''
Fit a power-law to the Delta-variance spectrum.
Parameters
----------
xlow : `~astropy.units.Quantity`, optional
Lower lag value to consider in the fit.
xhigh : `~astropy.units.Quantity`, optional
Upper lag value to consider in the fit.
brk : `~astropy.units.Quantity`, optional
Give an initial guess for a break point. This enables fitting
with a `turbustat.statistics.Lm_Seg`.
bootstrap : bool, optional
Bootstrap using the model residuals to estimate the standard
errors.
bootstrap_kwargs : dict, optional
Pass keyword arguments to `~turbustat.statistics.fitting_utils.residual_bootstrap`.
verbose : bool, optional
Show fit summary when enabled.
'''
x = np.log10(self.lags.value)
y = np.log10(self.delta_var)
if xlow is not None:
xlow = self._to_pixel(xlow)
lower_limit = x >= np.log10(xlow.value)
else:
lower_limit = \
np.ones_like(self.delta_var, dtype=bool)
xlow = self.lags.min() * 0.99
if xhigh is not None:
xhigh = self._to_pixel(xhigh)
upper_limit = x <= np.log10(xhigh.value)
else:
upper_limit = \
np.ones_like(self.delta_var, dtype=bool)
xhigh = self.lags.max() * 1.01
self._fit_range = [xlow, xhigh]
within_limits = np.logical_and(lower_limit, upper_limit)
y = y[within_limits]
x = x[within_limits]
weights = self.delta_var_error[within_limits] ** -2
min_fits_pts = 3
if brk is not None:
# Try fitting a segmented model
pix_brk = self._to_pixel(brk)
if pix_brk < xlow or pix_brk > xhigh:
raise ValueError("brk must be within xlow and xhigh.")
model = Lm_Seg(x, y, np.log10(pix_brk.value), weights=weights)
fit_kwargs['verbose'] = verbose
fit_kwargs['cov_type'] = 'HC3'
model.fit_model(**fit_kwargs)
self.fit = model.fit
if model.params.size == 5:
# Check to make sure this leaves enough to fit to.
if sum(x < model.brk) < min_fits_pts:
warn("Not enough points to fit to." +
" Ignoring break.")
self._brk = None
else:
good_pts = x.copy() < model.brk
x = x[good_pts]
y = y[good_pts]
self._brk = 10**model.brk * u.pix
self._slope = model.slopes
if bootstrap:
stderrs = residual_bootstrap(model.fit,
**bootstrap_kwargs)
self._slope_err = stderrs[1:-1]
self._brk_err = np.log(10) * self.brk.value * \
stderrs[-1] * u.pix
else:
self._slope_err = model.slope_errs
self._brk_err = np.log(10) * self.brk.value * \
model.brk_err * u.pix
self.fit = model.fit
else:
self._brk = None
# Break fit failed, revert to normal model
warn("Model with break failed, reverting to model\
without break.")
else:
self._brk = None
# Revert to model without break if none is given, or if the segmented
# model failed.
if self.brk is None:
x = sm.add_constant(x)
# model = sm.OLS(y, x, missing='drop')
model = sm.WLS(y, x, missing='drop', weights=weights)
self.fit = model.fit(cov_type='HC3')
self._slope = self.fit.params[1]
if bootstrap:
stderrs = residual_bootstrap(self.fit,
**bootstrap_kwargs)
self._slope_err = stderrs[1]
else:
self._slope_err = self.fit.bse[1]
self._bootstrap_flag = bootstrap
if verbose:
print(self.fit.summary())
if self._bootstrap_flag:
print("Bootstrapping used to find stderrs! "
"Errors may not equal those shown above.")
self._model = model
@property
def brk(self):
'''
Fitted break point.
'''
return self._brk
@property
def brk_err(self):
'''
1-sigma on the break point in the segmented linear model.
'''
return self._brk_err
@property
def slope(self):
'''
Fitted slope.
'''
return self._slope
@property
def slope_err(self):
'''
Standard error on the fitted slope.
'''
return self._slope_err
@property
def fit_range(self):
'''
Range of lags used in the fit.
'''
return self._fit_range
def fitted_model(self, xvals):
'''
Computes the fitted power-law in log-log space using the
given x values.
Parameters
----------
xvals : `~numpy.ndarray`
Values of log(lags) to compute the model at (base 10 log).
Returns
-------
model_values : `~numpy.ndarray`
Values of the model at the given values.
'''
if isinstance(self._model, Lm_Seg):
return self._model.model(xvals)
else:
return self.fit.params[0] + self.fit.params[1] * xvals
def plot_fit(self, save_name=None, xunit=u.pix, symbol='o', color='r',
fit_color='k', label=None,
show_residual=True):
'''
Plot the delta-variance curve and the fit.
Parameters
----------
save_name : str,optional
Save the figure when a file name is given.
xunit : u.Unit, optional
The unit to show the x-axis in.
symbol : str, optional
Shape to plot the data points with.
color : {str, RGB tuple}, optional
Color to show the delta-variance curve in.
fit_color : {str, RGB tuple}, optional
Color of the fitted line. Defaults to `color` when no input is
given.
label : str, optional
Label to later be used in a legend.
show_residual : bool, optional
Plot the fit residuals.
'''
if fit_color is None:
fit_color = color
import matplotlib.pyplot as plt
fig = plt.gcf()
axes = plt.gcf().get_axes()
if len(axes) == 0:
if show_residual:
ax = plt.subplot2grid((4, 1), (0, 0), colspan=1, rowspan=3)
ax_r = plt.subplot2grid((4, 1), (3, 0), colspan=1,
rowspan=1,
sharex=ax)
else:
ax = plt.subplot(111)
elif len(axes) == 1:
ax = axes[0]
else:
ax = axes[0]
ax_r = axes[1]
ax.set_xscale("log")
ax.set_yscale("log")
lags = self._spatial_unit_conversion(self.lags, xunit).value
# Check for NaNs
fin_vals = np.logical_or(np.isfinite(self.delta_var),
np.isfinite(self.delta_var_error))
ax.errorbar(lags[fin_vals], self.delta_var[fin_vals],
yerr=self.delta_var_error[fin_vals],
fmt="{}-".format(symbol), color=color,
label=label, zorder=-1)
xvals = np.linspace(self._fit_range[0].value,
self._fit_range[1].value,
100) * self.lags.unit
xvals_conv = self._spatial_unit_conversion(xvals, xunit).value
ax.plot(xvals_conv, 10**self.fitted_model(np.log10(xvals.value)),
'--', color=fit_color, linewidth=2)
xlow = \
self._spatial_unit_conversion(self._fit_range[0], xunit).value
xhigh = \
self._spatial_unit_conversion(self._fit_range[1], xunit).value
ax.axvline(xlow, color=color, alpha=0.5, linestyle='-.')
ax.axvline(xhigh, color=color, alpha=0.5, linestyle='-.')
# ax.legend(loc='best')
ax.grid(True)
if show_residual:
resids = self.delta_var - 10**self.fitted_model(np.log10(lags))
ax_r.errorbar(lags[fin_vals], resids[fin_vals],
yerr=self.delta_var_error[fin_vals],
fmt="{}-".format(symbol), color=color,
zorder=-1)
ax_r.set_ylabel("Residuals")
ax_r.set_xlabel("Lag ({})".format(xunit))
ax_r.axhline(0., color=fit_color, linestyle='--')
ax_r.axvline(xlow, color=color, alpha=0.5, linestyle='-.')
ax_r.axvline(xhigh, color=color, alpha=0.5, linestyle='-.')
ax_r.grid()
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel("Lag ({})".format(xunit))
ax.set_ylabel(r"$\sigma^{2}_{\Delta}$")
plt.tight_layout()
fig.subplots_adjust(hspace=0.1)
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def run(self, show_progress=True, verbose=False, xunit=u.pix,
nan_treatment='fill', preserve_nan=False,
allow_huge=False, boundary='wrap',
use_pyfftw=False, threads=1, pyfftw_kwargs={},
xlow=None, xhigh=None,
brk=None, fit_kwargs={},
save_name=None):
'''
Compute the delta-variance.
Parameters
----------
show_progress : bool, optional
Show a progress bar during the creation of the covariance matrix.
verbose : bool, optional
Plot delta-variance transform.
xunit : u.Unit, optional
The unit to show the x-axis in.
allow_huge : bool, optional
See `~DeltaVariance.do_convolutions`.
nan_treatment : bool, optional
Enable to interpolate over NaNs in the convolution. Default is
True.
boundary : {"wrap", "fill"}, optional
Use "wrap" for periodic boundaries, and "cut" for non-periodic.
use_pyfftw : bool, optional
Enable to use pyfftw, if it is installed.
threads : int, optional
Number of threads to use in FFT when using pyfftw.
pyfftw_kwargs : Passed to
See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
for a list of accepted kwargs.
xlow : `~astropy.units.Quantity`, optional
Lower lag value to consider in the fit.
xhigh : `~astropy.units.Quantity`, optional
Upper lag value to consider in the fit.
brk : `~astropy.units.Quantity`, optional
Give an initial break point guess. Enables fitting a segmented
linear model.
fit_kwargs : dict, optional
Passed to `~turbustat.statistics.lm_seg.Lm_Seg.fit_model` when
using a broken linear fit.
save_name : str,optional
Save the figure when a file name is given.
'''
self.compute_deltavar(allow_huge=allow_huge, boundary=boundary,
nan_treatment=nan_treatment,
preserve_nan=preserve_nan,
use_pyfftw=use_pyfftw,
threads=threads,
pyfftw_kwargs=pyfftw_kwargs,
show_progress=show_progress)
self.fit_plaw(xlow=xlow, xhigh=xhigh, brk=brk, verbose=verbose,
**fit_kwargs)
if verbose:
self.plot_fit(save_name=save_name, xunit=xunit)
return self
class DeltaVariance_Distance(object):
"""
Compares 2 datasets using delta-variance. The distance between them is
given by the Euclidean distance between the curves weighted by the
bootstrapped errors.
.. note:: When passing a computed `~DeltaVariance` class for `dataset1`
or `dataset2`, it may be necessary to recompute the
delta-variance if `use_common_lags=True` and the existing lags
do not match the common lags.
Parameters
----------
dataset1 : %(dtypes)s or `~DeltaVariance` class
Contains the data and header for one dataset. Or pass a
`~DeltaVariance` class that may be pre-computed.
dataset2 : %(dtypes)s or `~DeltaVariance` class
See `dataset1` above.
weights1 : %(dtypes)s
Weights for dataset1.
weights2 : %(dtypes)s
See above.
diam_ratio : float, optional
The ratio between the kernel sizes.
lags : numpy.ndarray or list, optional
The pixel scales to compute the delta-variance at.
lags2 : numpy.ndarray or list, optional
The pixel scales for the delta-variance of `dataset2`. Ignored if
`use_common_lags=True`.
use_common_lags : bool, optional
Use a set of common lags that have the same angular sizes for both
datasets. This is required for `DeltaVariance_Distance.curve_distance`
metric.
delvar_kwargs : dict, optional
Pass kwargs to `~DeltaVariance.run`.
delvar2_kwargs : dict, optional
Pass kwargs to `~DeltaVariance.run` for `dataset2`. When `None` is
given, the kwargs in `delvar_kwargs` are used for both datasets.
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
def __init__(self, dataset1, dataset2, weights1=None, weights2=None,
diam_ratio=1.5, lags=None, use_common_lags=True,
delvar_kwargs={}, delvar2_kwargs=None):
super(DeltaVariance_Distance, self).__init__()
if isinstance(dataset1, DeltaVariance):
_given_data1 = False
self.delvar1 = dataset1
else:
_given_data1 = True
dataset1 = copy(input_data(dataset1, no_header=False))
if isinstance(dataset1, DeltaVariance):
_given_data2 = False
self.delvar2 = dataset2
else:
_given_data2 = True
dataset2 = copy(input_data(dataset2, no_header=False))
self._common_lags = use_common_lags
# Create a default set of lags, in pixels
if use_common_lags:
if lags is None:
min_size = 3.0
nlags = 25
if _given_data1:
shape1 = dataset1[0].shape
else:
shape1 = self.delvar1.data.shape
if _given_data2:
shape2 = dataset2[0].shape
else:
shape2 = self.delvar2.data.shape
if min(shape1) > min(shape2):
lags = \
np.logspace(np.log10(min_size),
np.log10(min(shape2) / 2.),
nlags) * u.pix
else:
lags = \
np.logspace(np.log10(min_size),
np.log10(min(shape1) / 2.),
nlags) * u.pix
# Now adjust the lags such they have a common scaling when the
# datasets are not on a common grid.
if _given_data1:
wcs1 = WCS(dataset1[1])
else:
wcs1 = self.delvar1._wcs
if _given_data2:
wcs2 = WCS(dataset2[1])
else:
wcs2 = self.delvar2._wcs
scale = common_scale(wcs1, wcs2)
if scale == 1.0:
lags1 = lags
lags2 = lags
elif scale > 1.0:
lags1 = scale * lags
lags2 = lags
else:
lags1 = lags
lags2 = lags / float(scale)
else:
if lags2 is None and lags is not None:
lags2 = lags
if lags is not None:
lags1 = lags
else:
lags1 = None
# if fiducial_model is not None:
# self.delvar1 = fiducial_model
if _given_data1:
self.delvar1 = DeltaVariance(dataset1,
weights=weights1,
diam_ratio=diam_ratio, lags=lags1)
self.delvar1.run(**delvar_kwargs)
else:
# Check if we need to re-run the statistic if the lags are wrong.
if lags1 is not None:
if not (self.delvar1.lags == lags1).all():
self.delvar1.run(**delvar_kwargs)
if not hasattr(self.delvar1, "_slope"):
warn("DeltaVariance given as dataset1 does not have a fitted"
" slope. Re-running delta variance.")
if lags1 is not None:
self.delvar1._lags = lags1
self.delvar1.run(**delvar_kwargs)
if delvar2_kwargs is None:
delvar2_kwargs = delvar_kwargs
if _given_data2:
self.delvar2 = DeltaVariance(dataset2,
weights=weights2,
diam_ratio=diam_ratio, lags=lags2)
self.delvar2.run(**delvar2_kwargs)
else:
if lags2 is not None:
if not (self.delvar2.lags == lags2).all():
self.delvar2.run(**delvar2_kwargs)
if not hasattr(self.delvar2, "_slope"):
warn("DeltaVariance given as dataset2 does not have a fitted"
" slope. Re-running delta variance.")
if lags2 is not None:
self.delvar2._lags = lags2
self.delvar2.run(**delvar_kwargs)
@property
def curve_distance(self):
'''
The L2 norm between the delta-variance curves.
'''
return self._curve_distance
@property
def slope_distance(self):
'''
The t-statistic of the difference in the delta-variance slopes.
'''
return self._slope_distance
def distance_metric(self, verbose=False, xunit=u.pix,
save_name=None, plot_kwargs1={},
plot_kwargs2={}):
'''
Applies the Euclidean distance to the delta-variance curves.
Parameters
----------
verbose : bool, optional
Enables plotting.
xunit : `~astropy.units.Unit`, optional
Unit of the x-axis in the plot in pixel, angular, or
physical units.
save_name : str, optional
Name of the save file. Enables saving the figure.
plot_kwargs1 : dict, optional
Pass kwargs to `~turbustat.statistics.DeltaVariance.plot_fit` for
`dataset1`.
plot_kwargs2 : dict, optional
Pass kwargs to `~turbustat.statistics.DeltaVariance.plot_fit` for
`dataset2`.
'''
# curve distance is only defined if the delta-variance is measured at
# the same lags
if self._common_lags:
# Check for NaNs and negatives
nans1 = np.logical_or(np.isnan(self.delvar1.delta_var),
self.delvar1.delta_var <= 0.0)
nans2 = np.logical_or(np.isnan(self.delvar2.delta_var),
self.delvar2.delta_var <= 0.0)
all_nans = np.logical_or(nans1, nans2)
# Cut the curves at the specified xlow and xhigh points
fit_range1 = self.delvar1.fit_range
fit_range2 = self.delvar2.fit_range
# The curve metric only makes sense if the same range is used for
# both
check_range = fit_range1[0] == fit_range2[0] and \
fit_range1[1] == fit_range2[1]
if check_range:
# Lags are always in pixels. As are the limits
cuts1 = np.logical_and(self.delvar1.lags >= fit_range1[0],
self.delvar1.lags <= fit_range1[1])
cuts2 = np.logical_and(self.delvar2.lags >= fit_range2[0],
self.delvar2.lags <= fit_range2[1])
valids1 = np.logical_and(cuts1, ~all_nans)
valids2 = np.logical_and(cuts2, ~all_nans)
deltavar1_sum = np.sum(self.delvar1.delta_var[valids1])
deltavar1 = \
np.log10(self.delvar1.delta_var[valids1] / deltavar1_sum)
deltavar2_sum = np.sum(self.delvar2.delta_var[valids2])
deltavar2 = \
np.log10(self.delvar2.delta_var[valids2] / deltavar2_sum)
# Distance between two normalized curves
self._curve_distance = np.linalg.norm(deltavar1 - deltavar2)
else:
warn("The curve distance is only defined when the fit "
"range and lags for both datasets are equal. "
"Setting curve_distance to NaN.", TurbuStatMetricWarning)
self._curve_distance = np.NaN
else:
self._curve_distance = np.NaN
# Distance between the fitted slopes (combined t-statistic)
self._slope_distance = \
np.abs(self.delvar1.slope - self.delvar2.slope) / \
np.sqrt(self.delvar1.slope_err**2 + self.delvar2.slope_err**2)
if verbose:
import matplotlib.pyplot as plt
print(self.delvar1.fit.summary())
print(self.delvar2.fit.summary())
defaults1 = {'color': 'b', 'symbol': 'D', 'label': '1'}
defaults2 = {'color': 'g', 'symbol': 'o', 'label': '2'}
for key in defaults1:
if key not in plot_kwargs1:
plot_kwargs1[key] = defaults1[key]
for key in defaults2:
if key not in plot_kwargs2:
plot_kwargs2[key] = defaults2[key]
if 'xunit' in plot_kwargs1:
del plot_kwargs1['xunit']
if 'xunit' in plot_kwargs2:
del plot_kwargs2['xunit']
self.delvar1.plot_fit(xunit=xunit, **plot_kwargs1)
self.delvar2.plot_fit(xunit=xunit, **plot_kwargs2)
axes = plt.gcf().get_axes()
axes[0].legend(loc='best', frameon=True)
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
def _delvar(array, weight, lag):
'''
Computes the delta variance of the given array.
'''
arr_cent = array.copy() - np.nanmean(array, axis=None)
val = np.nansum(arr_cent ** 2. * weight) /\
np.nansum(weight)
# The error needs to be normalized by the number of independent
# pixels in the array.
# Take width to be 1/2 FWHM. Note that lag is defined as 2*sigma.
# So 2ln(2) sigma^2 = ln(2)/2 * lag^2
kern_area = np.ceil(0.5 * np.pi * np.log(2) * lag**2).astype(int)
nindep = np.sqrt(np.isfinite(arr_cent).sum() // kern_area)
val_err = np.sqrt((np.nansum(arr_cent ** 4. * weight) /
np.nansum(weight)) - val**2) / nindep
return val, val_err
| mit |
satriaphd/bgc-learn | core/utils.py | 1 | 8964 | import sys
import os
import subprocess
import json
import straight.plugin
from tempfile import TemporaryFile
from os import path
from core import log
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio import SearchIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from helperlibs.wrappers.io import TemporaryDirectory
from sklearn.externals import joblib
from time import gmtime, strftime
def get_version():
return "0.0.0alpha"
def load_plugins(module):
""
plugins = list(straight.plugin.load("modules.%s" % module))
plugins.sort(cmp=lambda x, y: cmp(x.priority, y.priority))
return plugins
available_algorithms = {
"classification": [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("classification")],
"regression": [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("regression")]
}
available_features = [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("feature_extraction")]
def get_algo_type(algo):
""
result = ""
for plugin in available_algorithms["classification"]:
if algo == plugin["name"]:
result = "classification"
break
for plugin in available_algorithms["regression"]:
if algo == plugin["name"]:
result = "regression"
break
return result
def check_unsupported_features(feature_list):
""
mapped_features = []
for feature in feature_list:
for plugin in available_features:
if feature == plugin["name"]:
mapped_features.append(feature)
unmapped_features = list(set(feature_list) - set(mapped_features))
return unmapped_features
def save_bgcjson(bgc, output_folder):
""
save_folder = path.join(output_folder, "bgcjson")
if not path.exists(save_folder):
if not path.isdir(save_folder):
os.makedirs(save_folder)
else:
log.error("Failed to save bgcjson, folder '%s' is unwritable" % save_folder)
sys.exit(1)
with open(path.join(save_folder, "%s.bgcjson" % bgc["id"]), "w") as bgcjson:
bgcjson.write(json.dumps(bgc, indent=4, separators=(',', ': ')))
def save_result_model(classifier, metadata, output_folder):
""
result_folder = path.join(output_folder, "results")
if not path.exists(result_folder):
if not path.isdir(result_folder):
os.makedirs(result_folder)
else:
log.error("Failed to create result folder")
sys.exit(1)
folder_name = "model-%s-%s" % (metadata["algorithm"]["name"], strftime("%Y%m%d-%H%M%S", gmtime()))
model_folder = path.join(result_folder, folder_name)
if not path.exists(model_folder):
os.makedirs(model_folder)
joblib.dump(classifier, path.join(model_folder, "classifier.pkl"))
with open(path.join(model_folder, "metadata.txt"), "w") as metafile:
metafile.write(json.dumps(metadata, indent=4, separators=(',', ': ')))
else:
log.error("Cannot write into '%s', folder exist." % (model_folder))
sys.exit(1)
return folder_name
def save_result_testing(folder_name, rows, columns, X, y, predicted, output_folder):
""
result_folder = path.join(output_folder, "results")
if not path.exists(result_folder):
if not path.isdir(result_folder):
os.makedirs(result_folder)
else:
log.error("Failed to create result folder")
sys.exit(1)
save_folder = path.join(result_folder, folder_name)
if not path.exists(save_folder):
os.makedirs(save_folder)
with open(path.join(save_folder, "result.txt"), "w") as resfile:
log.info("%s scores:" % folder_name)
for key, value in predicted[0].iteritems():
log.info("%s=%s" % (key, str(value)))
resfile.write("%s=%s\n" % (key, str(value)))
resfile.write("Predictions:\n")
resfile.write("row_name\t%s\texpected\tpredicted\n" % ("\t".join(columns)))
for i, fname in enumerate(rows):
resfile.write("%s\t%s\t%s\t%s\n" % (fname, "\t".join(["%.8f" % val for val in X[i]]), str(y[i]), str(predicted[1][i])))
else:
log.error("Cannot write into '%s', folder exist." % (model_folder))
sys.exit(1)
return folder_name
def create_feature_folder(input_files, output_folder, overwrite=True):
""
save_folder = path.join(output_folder, "features")
save_file = path.join(save_folder, "index.txt")
if not path.exists(save_folder):
if not path.isdir(save_folder):
os.makedirs(save_folder)
else:
log.error("Failed to create feature folder, folder '%s' is unwritable" % save_folder)
sys.exit(1)
if (not path.exists(save_file)) or overwrite:
with open(save_file, "w") as f:
for i, val in enumerate(input_files):
f.write("%d:%s\n" % (i, get_bgc_name(val)))
return save_folder
def get_bgc_name(file_path):
""
return path.splitext(path.basename(file_path))[0]
# ported from https://github.com/antismash/antismash
# pylint: disable=redefined-builtin
def execute(commands, input=None):
"Execute commands in a system-independent manner"
if input is not None:
stdin_redir = subprocess.PIPE
else:
stdin_redir = None
try:
proc = subprocess.Popen(commands, stdin=stdin_redir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=input)
retcode = proc.returncode
return out, err, retcode
except OSError, e:
logging.debug("%r %r returned %r", commands, input[:40] if input is not None else None, e)
raise
# pylint: enable=redefined-builtin
def run_hmmscan(query_sequence, opts=None):
""
# TODO: check if hmmscan available
# check if Pfam-A.hmm exists
pfam_folder = path.abspath(path.join(path.realpath(__file__), path.pardir, path.pardir, "resources", "pfamdb"))
if not path.exists(pfam_folder):
os.makedirs(pfam_folder)
else:
if not path.isdir(pfam_folder):
log.error("Failed to do hmmscan, folder '%s' is unwritable" % pfam_folder)
sys.exit(1)
if not path.exists(path.join(pfam_folder, "Pfam-A.hmm.h3m")):
log.error("Pfam-A database not downloaded, please run download_resources.py first")
sys.exit(1)
results = []
with TemporaryDirectory() as temp_dir:
domtblout = path.abspath(path.join(temp_dir, "domtblout"))
textout = path.abspath(path.join(temp_dir, "textout"))
command = ["hmmscan", "--cut_tc", "--domtblout", domtblout, "-o", textout]
temp_fasta = path.abspath(path.join(temp_dir, "temp_fasta"))
with open(temp_fasta, "w") as tf:
tf.write(query_sequence)
if opts is not None:
command.extend(opts)
command.extend([path.join(pfam_folder, "Pfam-A.hmm"), temp_fasta])
try:
out, err, retcode = execute(command)
except OSError:
return []
if retcode != 0:
log.error('Hmmscan returned %d: %r while scanning %r' % (retcode, err, query_sequence))
sys.exit(1)
results = {
"data": list(SearchIO.parse(textout, 'hmmer3-text')),
"accessions": {},
"lengths": {},
}
for scan in list(SearchIO.parse(domtblout, 'hmmscan3-domtab')):
for hit in scan.hits:
if hit.id not in results["accessions"]:
results["accessions"][hit.id] = hit.accession
if hit.id not in results["lengths"]:
results["lengths"][hit.id] = hit.seq_len
return results
def align_hsp_to_model(hsp, model_length): # this works only for hmmscan model (cds as query)
pad_left = ""
pad_right = ""
for i in xrange(0, hsp.hit_start):
pad_left += "."
for i in xrange(hsp.hit_end, model_length):
pad_right += "."
if hsp.hit_strand != hsp.query_strand:
pad_left, pad_right = pad_right, pad_left
return "%s%s%s" % (pad_left, str(hsp.hit.seq).replace(".", ""), pad_right)
# -*- coding: utf-8 -*-
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '*' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s %s%s (%i/%i) %s' % (prefix, percents, '%', iteration, total, suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
| gpl-3.0 |
jmargeta/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 13 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.todense(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
maxlikely/scikit-learn | examples/grid_search_digits.py | 1 | 2628 | """
=====================================================================
Parameter estimation using grid search with a nested cross-validation
=====================================================================
The classifier is optimized by "nested" cross-validation using the
:class:`sklearn.grid_search.GridSearchCV` object on a development set
that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, scoring=score)
clf.fit(X_train, y_train, cv=5)
print("Best parameters set found on development set:")
print()
print(clf.best_estimator_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
SunilMahendrakar/pagmo | PyGMO/problem/_gtop.py | 3 | 32255 | from PyGMO.problem._problem_space import cassini_1, gtoc_1, gtoc_2, cassini_2, rosetta, messenger_full, tandem, laplace, sagas, mga_1dsm_alpha, mga_1dsm_tof, mga_incipit, mga_incipit_cstrs, mga_part, _gtoc_2_objective
# Redefining the constructors of all problems to obtain good documentation
# and allowing kwargs
def _cassini_1_ctor(self, objectives=1):
"""
Constructs a Cassini 1 Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Its single objective version has a global minimum at 4.9307 [km/s],
and it is a deceptive problem with a larger minimum at 5.303 [km/s]
USAGE: problem.cassini_1(objectives = 1)
* objectives: number of objectives. 1=DV, 2=DV,DT
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(objectives)
self._orig_init(*arg_list)
cassini_1._orig_init = cassini_1.__init__
cassini_1.__init__ = _cassini_1_ctor
def _gtoc_1_ctor(self):
"""
Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Best known global minima is at -1,581,950
USAGE: problem.gtoc_1()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list)
gtoc_1._orig_init = gtoc_1.__init__
gtoc_1.__init__ = _gtoc_1_ctor
def _cassini_2_ctor(self):
"""
Constructs a Cassini 2 Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
It models the same interplanetary trajectory as the cassini_1 problem, but
in a more accurate fashion, allowing deep space manouvres
Best known global minimum is at 8.383 [km/s]
USAGE: problem.cassini_2()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list)
cassini_2._orig_init = cassini_2.__init__
cassini_2.__init__ = _cassini_2_ctor
def _rosetta_ctor(self):
"""
Constructs a Rosetta Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Best known global minimum is at 1.343 [km/s]
USAGE: problem.rosetta()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list)
rosetta._orig_init = rosetta.__init__
rosetta.__init__ = _rosetta_ctor
def _messenger_full_ctor(self):
"""
Constructs a Mesenger Full Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Best known global minimum is at 2.113
USAGE: problem.messenger_full()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list)
messenger_full._orig_init = messenger_full.__init__
messenger_full.__init__ = _messenger_full_ctor
def _tandem_ctor(self, prob_id=7, max_tof=-1):
"""
Constructs a TandEM Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]. The objective function is -log(m_final).
USAGE: problem.tandem(prob_id = 7, max_tof = -1)
* prob_id: Selects the problem variant (one of 1..25). All problems differ from the fly-by sequence
* max_tof = Activates a constriants on the maximum time of flight allowed (in years)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(prob_id)
arg_list.append(max_tof)
self._orig_init(*arg_list)
tandem._orig_init = tandem.__init__
tandem.__init__ = _tandem_ctor
def _laplace_ctor(self, seq=[3, 2, 3, 3, 5]):
"""
Constructs a EJSM-Laplace Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) is similar to TandEM, but targets Jupiter and the user
can specify explicitly the planetary fly-by sequence
USAGE: problem.laplace(seq = [3,2,3,3,5])
* seq: The planetary sequence. This is a list of ints that represent the planets to visit
1 - Mercury, 2 - Venus, 3 - Earth, 4 - Mars, 5 - Jupiter, 6 - Saturn. It must start from 3 (Earth)
and end with 5 (Jupiter)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
self._orig_init(*arg_list)
laplace._orig_init = laplace.__init__
laplace.__init__ = _laplace_ctor
def _sagas_ctor(self):
"""
Constructs a SAGAS Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA-1DSM) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
USAGE: problem.sagas()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list)
sagas._orig_init = sagas.__init__
sagas.__init__ = _sagas_ctor
gtoc_2.obj = _gtoc_2_objective
def _gtoc_2_ctor(self, ast1=815, ast2=300, ast3=110, ast4=47, n_seg=10, objective=gtoc_2.obj.MASS_TIME):
"""
Constructs a GTOC 2 Problem (Constrained Continuous Single-Objective)
NOTE: This problem is a quite faithful transcription of the problem used during the GTOC2 competition
It Transcribe the whole OCP resulting from the low-thrust dynamics into an NLP. As such it is very
difficult to find feasible solutions. Note that by default the asteroid sequence is the winning one
from Turin University.
USAGE: problem.gtoc_2(ast1 = 815, ast2 = 300, ast3 = 110, ast4 = 47, n_seg = 10, objective = gtoc_2.obj.MASS_TIME)
* ast1 id of the first asteroid to visit (Group 1: 0 - 95)
* ast2 id of the second asteroid to visit (Group 2: 96 - 271)
* ast3 id of the third asteroid to visit (Group 3: 272 - 571)
* ast4 id of the fourth asteroid to visit (Group 4: 572 - 909)
* n_seg number of segments to be used per leg
* obj objective function in the enum {MASS,TIME,MASS_TIME}
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(ast1)
arg_list.append(ast2)
arg_list.append(ast3)
arg_list.append(ast4)
arg_list.append(n_seg)
arg_list.append(objective)
self._orig_init(*arg_list)
gtoc_2._orig_init = gtoc_2.__init__
gtoc_2.__init__ = _gtoc_2_ctor
from PyKEP.core._core import epoch
from PyKEP.planet import jpl_lp, gtoc6
def _mga_1dsm_alpha_ctor(
self, seq=[jpl_lp('earth'), jpl_lp('venus'), jpl_lp('earth')],
t0=[epoch(0), epoch(1000)], tof=[365.25, 5.0 * 365.25], vinf=[0.5,
2.5], multi_objective=False, add_vinf_dep=False, add_vinf_arr=True):
"""
Constructs an mga_1dsm problem (alpha-encoding)
USAGE: problem.mga_1dsm(seq = [jpl_lp('earth'),jpl_lp('venus'),jpl_lp('earth')], t0 = [epoch(0),epoch(1000)], tof = [365.25,5.0 * 365.25], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True)
* seq: list of PyKEP planets defining the encounter sequence (including the starting planet)
* t0: list of two epochs defining the launch window
* tof: list of two floats defining the minimum and maximum allowed mission length (days)
* vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec)
* multi_objective: when True constructs a multiobjective problem (dv, T)
* add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch)
* add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
arg_list.append(t0[0])
arg_list.append(t0[1])
arg_list.append(tof[0])
arg_list.append(tof[1])
arg_list.append(vinf[0])
arg_list.append(vinf[1])
arg_list.append(multi_objective)
arg_list.append(add_vinf_dep)
arg_list.append(add_vinf_arr)
self._orig_init(*arg_list)
mga_1dsm_alpha._orig_init = mga_1dsm_alpha.__init__
mga_1dsm_alpha.__init__ = _mga_1dsm_alpha_ctor
def _mga_1dsm_tof_ctor(
self, seq=[
jpl_lp('earth'), jpl_lp('venus'), jpl_lp('earth')], t0=[
epoch(0), epoch(1000)], tof=[
[
50, 900], [
50, 900]], vinf=[
0.5, 2.5], multi_objective=False, add_vinf_dep=False, add_vinf_arr=True):
"""
Constructs an mga_1dsm problem (tof-encoding)
USAGE: problem.mga_1dsm(seq = [jpl_lp('earth'),jpl_lp('venus'),jpl_lp('earth')], t0 = [epoch(0),epoch(1000)], tof = [ [50, 900], [50, 900] ], vinf = [0.5, 2.5], multi_objective = False, add_vinf_dep = False, add_vinf_arr = True)
* seq: list of PyKEP planets defining the encounter sequence (including the starting planet)
* t0: list of two epochs defining the launch window
* tof: list of intervals defining the times of flight (days)
* vinf: list of two floats defining the minimum and maximum allowed initial hyperbolic velocity at launch (km/sec)
* multi_objective: when True constructs a multiobjective problem (dv, T)
* add_vinf_dep: when True the computed Dv includes the initial hyperbolic velocity (at launch)
* add_vinf_arr: when True the computed Dv includes the final hyperbolic velocity (at arrival)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
arg_list.append(t0[0])
arg_list.append(t0[1])
arg_list.append(tof)
arg_list.append(vinf[0])
arg_list.append(vinf[1])
arg_list.append(multi_objective)
arg_list.append(add_vinf_dep)
arg_list.append(add_vinf_arr)
self._orig_init(*arg_list)
mga_1dsm_tof._orig_init = mga_1dsm_tof.__init__
mga_1dsm_tof.__init__ = _mga_1dsm_tof_ctor
def _mga_incipit_ctor(
self, seq=[
gtoc6('io'), gtoc6('io'), gtoc6('europa')], t0=[
epoch(7305.0), epoch(11323.0)], tof=[
[
100, 200], [
3, 200], [
4, 100]]):
"""
USAGE: mga_incipit(seq = [gtoc6('io'),gtoc6('io'),gtoc6('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]])
* seq: list of jupiter moons defining the trajectory incipit
* t0: list of two epochs defining the launch window
* tof: list of n lists containing the lower and upper bounds for the legs flight times (days)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
arg_list.append(t0[0])
arg_list.append(t0[1])
arg_list.append(tof)
self._orig_init(*arg_list)
mga_incipit._orig_init = mga_incipit.__init__
mga_incipit.__init__ = _mga_incipit_ctor
def _mga_incipit_cstrs_ctor(
self, seq=[
gtoc6('io'), gtoc6('io'), gtoc6('europa')], t0=[
epoch(7305.0), epoch(11323.0)], tof=[
[
100, 200], [
3, 200], [
4, 100]], Tmax=300.00, Dmin=2.0):
"""
USAGE: mga_incipit_cstrs(seq = [gtoc6('io'),gtoc6('io'),gtoc6('europa')], t0 = [epoch(6905.0),epoch(11323.0)], tof = [[100,200],[3,200],[4,100]], Tmax = 365.25, Dmin = 0.2)
* seq: list of jupiter moons defining the trajectory incipit
* t0: list of two epochs defining the launch window
* tof: list of n lists containing the lower and upper bounds for the legs flight times (days)
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
arg_list.append(t0[0])
arg_list.append(t0[1])
arg_list.append(tof)
arg_list.append(Tmax)
arg_list.append(Dmin)
self._orig_init(*arg_list)
mga_incipit_cstrs._orig_init = mga_incipit_cstrs.__init__
mga_incipit_cstrs.__init__ = _mga_incipit_cstrs_ctor
def _mga_part_ctor(
self, seq=[
gtoc6('europa'), gtoc6('europa'), gtoc6('europa')], tof=[
[
5, 50], [
5, 50]], t0=epoch(11000), v_inf_in=[
1500.0, 350.0, 145.0]):
"""
USAGE: mga_part(seq = [gtoc6('europa'),gtoc6('europa'),gtoc6('europa')], tof = [[5,50],[5,50]], t0 = epoch(11000), v_inf_in[1500.0,350.0,145.0])
* seq: list of jupiter moons defining the trajectory incipit
* tof: list of n lists containing the lower and upper bounds for the legs flight times (days)
* t0: starting epoch
* v_inf_in: Incoming spacecraft relative velocity
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(seq)
arg_list.append(tof)
arg_list.append(t0)
arg_list.append(v_inf_in)
self._orig_init(*arg_list)
mga_part._orig_init = mga_part.__init__
mga_part.__init__ = _mga_part_ctor
# Plot of the trajectory for an mga_1dsm problem
def _mga_1dsm_alpha_plot(self, x):
"""
Plots the trajectory represented by the decision vector x
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(0, 0, 0, color='y')
seq = self.get_sequence()
n = (len(seq) - 1)
# 1 - we 'decode' the chromosome recording the various times of flight
# (days) in the list T
T = list([0] * (n))
alpha_sum = 0
for i in range(n):
T[i] = x[1] * x[6 + 4 * i]
alpha_sum += x[6 + 4 * i]
for i in range(n):
T[i] /= alpha_sum
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (n + 1))
r_P = list([None] * (n + 1))
v_P = list([None] * (n + 1))
DV = list([None] * (n + 1))
for i, planet in enumerate(seq):
t_P[i] = epoch(x[0] + sum(T[0:i]))
r_P[i], v_P[i] = planet.eph(t_P[i])
plot_planet(ax, planet, t0=t_P[i], color=(
0.8, 0.6, 0.8), legend=True, units = AU)
# 3 - We start with the first leg
theta = 2 * pi * x[2]
phi = acos(2 * x[3] - 1) - pi / 2
Vinfx = x[4] * cos(phi) * cos(theta)
Vinfy = x[4] * cos(phi) * sin(theta)
Vinfz = x[4] * sin(phi)
v0 = [a + b for a, b in zip(v_P[0], [Vinfx, Vinfy, Vinfz])]
r, v = propagate_lagrangian(
r_P[0], v0, x[5] * T[0] * DAY2SEC, seq[0].mu_central_body)
plot_kepler(
ax,
r_P[0],
v0,
x[5] *
T[0] *
DAY2SEC,
seq[0].mu_central_body,
N=100,
color='b',
legend=False,
units=AU)
# Lambert arc to reach seq[1]
dt = (1 - x[5]) * T[0] * DAY2SEC
l = lambert_problem(r, r_P[1], dt, seq[0].mu_central_body)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# First DSM occuring at time nu1*T1
DV[0] = norm([a - b for a, b in zip(v_beg_l, v)])
# 4 - And we proceed with each successive leg
for i in range(1, n):
# Fly-by
v_out = fb_prop(v_end_l,
v_P[i],
x[8 + (i - 1) * 4] * seq[i].radius,
x[7 + (i - 1) * 4],
seq[i].mu_self)
# s/c propagation before the DSM
r, v = propagate_lagrangian(
r_P[i], v_out, x[9 + (i - 1) * 4] * T[i] * DAY2SEC, seq[0].
mu_central_body)
plot_kepler(ax,
r_P[i],
v_out,
x[9 + (i - 1) * 4] * T[i] * DAY2SEC,
seq[0].mu_central_body,
N=100,
color='b',
legend=False,
units=AU)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[9 + (i - 1) * 4]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i + 1], dt, seq[0].mu_central_body)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# DSM occurring at time nu2*T2
DV[i] = norm([a - b for a, b in zip(v_beg_l, v)])
return ax
mga_1dsm_alpha.plot = _mga_1dsm_alpha_plot
# Plot of the trajectory for an mga_1dsm problem
def _mga_1dsm_tof_plot_old(self, x):
"""
Plots the trajectory represented by the decision vector x
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(0, 0, 0, color='y')
seq = self.get_sequence()
n = (len(seq) - 1)
# 1 - we 'decode' the chromosome recording the various times of flight
# (days) in the list T
T = x[5::4]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (n + 1))
r_P = list([None] * (n + 1))
v_P = list([None] * (n + 1))
DV = list([None] * (n + 1))
for i, planet in enumerate(seq):
t_P[i] = epoch(x[0] + sum(T[0:i]))
r_P[i], v_P[i] = planet.eph(t_P[i])
plot_planet(ax, planet, t0=t_P[i], color=(
0.8, 0.6, 0.8), legend=True, units = AU)
# 3 - We start with the first leg
theta = 2 * pi * x[1]
phi = acos(2 * x[2] - 1) - pi / 2
Vinfx = x[3] * cos(phi) * cos(theta)
Vinfy = x[3] * cos(phi) * sin(theta)
Vinfz = x[3] * sin(phi)
v0 = [a + b for a, b in zip(v_P[0], [Vinfx, Vinfy, Vinfz])]
r, v = propagate_lagrangian(
r_P[0], v0, x[4] * T[0] * DAY2SEC, seq[0].mu_central_body)
plot_kepler(
ax,
r_P[0],
v0,
x[4] *
T[0] *
DAY2SEC,
seq[0].mu_central_body,
N=100,
color='b',
legend=False,
units=AU)
# Lambert arc to reach seq[1]
dt = (1 - x[4]) * T[0] * DAY2SEC
l = lambert_problem(r, r_P[1], dt, seq[0].mu_central_body)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# First DSM occuring at time nu1*T1
DV[0] = norm([a - b for a, b in zip(v_beg_l, v)])
# 4 - And we proceed with each successive leg
for i in range(1, n):
# Fly-by
v_out = fb_prop(v_end_l,
v_P[i],
x[7 + (i - 1) * 4] * seq[i].radius,
x[6 + (i - 1) * 4],
seq[i].mu_self)
# s/c propagation before the DSM
r, v = propagate_lagrangian(
r_P[i], v_out, x[8 + (i - 1) * 4] * T[i] * DAY2SEC, seq[0].
mu_central_body)
plot_kepler(ax,
r_P[i],
v_out,
x[8 + (i - 1) * 4] * T[i] * DAY2SEC,
seq[0].mu_central_body,
N=100,
color='b',
legend=False,
units=AU)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[8 + (i - 1) * 4]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i + 1], dt, seq[0].mu_central_body)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=AU)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# DSM occurring at time nu2*T2
DV[i] = norm([a - b for a, b in zip(v_beg_l, v)])
return ax
mga_1dsm_tof.plot_old = _mga_1dsm_tof_plot_old
# Plot of the trajectory of an mga_incipit problem
def _mga_incipit_plot_old(self, x, plot_leg_0=False):
"""
Plots the trajectory represented by the decision vector x
Example::
prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d', aspect='equal')
ax.scatter(0, 0, 0, color='y')
JR = 71492000.0
legs = len(x) / 4
seq = self.get_sequence()
common_mu = seq[0].mu_central_body
# 1 - we 'decode' the chromosome recording the various times of flight
# (days) in the list T
T = x[3::4]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * legs)
r_P = list([None] * legs)
v_P = list([None] * legs)
DV = list([None] * legs)
for i, planet in enumerate(seq):
t_P[i] = epoch(x[0] + sum(T[:i + 1]))
r_P[i], v_P[i] = planet.eph(t_P[i])
plot_planet(ax, planet, t0=t_P[i], color=(
0.8, 0.6, 0.8), legend=True, units = JR)
# 3 - We start with the first leg: a lambert arc
theta = 2 * pi * x[1]
phi = acos(2 * x[2] - 1) - pi / 2
# phi close to zero is in the moon orbit plane injection
r = [cos(phi) * sin(theta), cos(phi) * cos(theta), sin(phi)]
r = [JR * 1000 * d for d in r]
l = lambert_problem(r, r_P[0], T[0] * DAY2SEC, common_mu, False, False)
if (plot_leg_0):
plot_lambert(ax, l, sol=0, color='k', legend=False, units=JR, N=500)
# Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# 4 - And we proceed with each successive leg
for i in range(1, legs):
# Fly-by
v_out = fb_prop(v_end_l,
v_P[i - 1],
x[1 + 4 * i] * seq[i - 1].radius,
x[4 * i],
seq[i - 1].mu_self)
# s/c propagation before the DSM
r, v = propagate_lagrangian(
r_P[i - 1], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu)
plot_kepler(ax,
r_P[i - 1],
v_out,
x[4 * i + 2] * T[i] * DAY2SEC,
common_mu,
N=500,
color='b',
legend=False,
units=JR)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i], dt, common_mu, False, False)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=JR, N=500)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
plt.show()
return ax
mga_incipit.plot_old = _mga_incipit_plot_old
# Plot of the trajectory of an mga_part problem
def _mga_part_plot_old(self, x):
"""
Plots the trajectory represented by the decision vector x
Example::
prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d', aspect='equal')
ax.scatter(0, 0, 0, color='y')
JR = 71492000.0
legs = len(x) / 4
seq = self.get_sequence()
common_mu = seq[0].mu_central_body
start_mjd2000 = self.t0.mjd2000
# 1 - we 'decode' the chromosome recording the various times of flight
# (days) in the list T
T = x[3::4]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (legs + 1))
r_P = list([None] * (legs + 1))
v_P = list([None] * (legs + 1))
for i, planet in enumerate(seq):
t_P[i] = epoch(start_mjd2000 + sum(T[:i]))
r_P[i], v_P[i] = planet.eph(t_P[i])
plot_planet(ax, planet, t0=t_P[i], color=(
0.8, 0.6, 0.8), legend=True, units = JR)
v_end_l = [a + b for a, b in zip(v_P[0], self.vinf_in)]
# 4 - And we iterate on the legs
for i in range(0, legs):
# Fly-by
v_out = fb_prop(v_end_l,
v_P[i],
x[1 + 4 * i] * seq[i - 1].radius,
x[4 * i],
seq[i].mu_self)
# s/c propagation before the DSM
r, v = propagate_lagrangian(
r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu)
plot_kepler(ax, r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC,
common_mu, N=500, color='b', legend=False, units=JR)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i + 1], dt, common_mu, False, False)
plot_lambert(ax, l, sol=0, color='r', legend=False, units=JR, N=500)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
plt.show()
return ax
mga_part.plot_old = _mga_part_plot_old
# Plot of concatenated fly-by legs
def _part_plot(x, units, axis, seq, start_mjd2000, vinf_in):
"""
Plots the trajectory represented by a decision vector x = [beta,rp,eta,T] * N
associated to a sequence seq, a start_mjd2000 and an incoming vinf_in
"""
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
legs = len(x) / 4
common_mu = seq[0].mu_central_body
# 1 - we 'decode' the chromosome recording the various times of flight
# (days) in the list T
T = x[3::4]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (legs + 1))
r_P = list([None] * (legs + 1))
v_P = list([None] * (legs + 1))
for i, planet in enumerate(seq):
t_P[i] = epoch(start_mjd2000 + sum(T[:i]))
r_P[i], v_P[i] = planet.eph(t_P[i])
plot_planet(planet, t0=t_P[i], color=(
0.8, 0.6, 0.8), legend=True, units = units, ax=axis)
v_end_l = [a + b for a, b in zip(v_P[0], vinf_in)]
# 4 - And we iterate on the legs
for i in range(0, legs):
# Fly-by
v_out = fb_prop(v_end_l,
v_P[i],
x[1 + 4 * i] * seq[i].radius,
x[4 * i],
seq[i].mu_self)
# s/c propagation before the DSM
r, v = propagate_lagrangian(
r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC, common_mu)
plot_kepler(r_P[i], v_out, x[4 * i + 2] * T[i] * DAY2SEC,
common_mu, N=500, color='b', legend=False, units=units, ax=axis)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[4 * i + 2]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i + 1], dt, common_mu, False, False)
plot_lambert(
l, sol=0, color='r', legend=False, units=units, N=500, ax=axis)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# Plot of the trajectory of an mga_part problem
def _mga_part_plot(self, x):
"""
Plots the trajectory represented by the decision vector x
Example::
prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d', aspect='equal')
# Plots the central 'planet'star
axis.scatter(0, 0, 0, color='y')
JR = 71492000.0
seq = self.get_sequence()
start_mjd2000 = self.t0.mjd2000
_part_plot(x, JR, ax, seq, start_mjd2000, self.vinf_in)
return ax
mga_part.plot = _mga_part_plot
def _mga_incipit_plot(self, x, plot_leg_0=False):
"""
Plots the trajectory represented by the decision vector x
Example::
prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d', aspect='equal')
ax.scatter(0, 0, 0, color='y')
JR = 71492000.0
seq = self.get_sequence()
common_mu = seq[0].mu_central_body
r_P, v_P = seq[0].eph(epoch(x[0] + x[3]))
# 3 - We start with the first leg: a lambert arc
theta = 2 * pi * x[1]
phi = acos(2 * x[2] - 1) - pi / 2
# phi close to zero is in the moon orbit plane injection
r = [cos(phi) * sin(theta), cos(phi) * cos(theta), sin(phi)]
r = [JR * 1000 * d for d in r]
l = lambert_problem(r, r_P, x[3] * DAY2SEC, common_mu, False, False)
if (plot_leg_0):
plot_lambert(ax, l, sol=0, color='k', legend=False, units=JR, N=500)
# Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
vinf_in = [a - b for a, b in zip(v_end_l, v_P)]
_part_plot(x[4:], JR, ax, seq, x[0] + x[3], vinf_in)
return ax
mga_incipit.plot = _mga_incipit_plot
# Plot of the trajectory for an mga_1dsm problem
def _mga_1dsm_tof_plot(self, x):
"""
Plots the trajectory represented by the decision vector x
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from PyKEP import epoch, propagate_lagrangian, lambert_problem, fb_prop, AU, MU_SUN, DAY2SEC
from math import pi, acos, cos, sin
from scipy.linalg import norm
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
axis.scatter(0, 0, 0, color='y')
seq = self.get_sequence()
# 2 - We plot the first leg
r_P0, v_P0 = seq[0].eph(epoch(x[0]))
plot_planet(seq[0], t0=epoch(x[0]), color=(
0.8, 0.6, 0.8), legend=True, units = AU, ax=axis)
r_P1, v_P1 = seq[1].eph(epoch(x[0] + x[5]))
theta = 2 * pi * x[1]
phi = acos(2 * x[2] - 1) - pi / 2
Vinfx = x[3] * cos(phi) * cos(theta)
Vinfy = x[3] * cos(phi) * sin(theta)
Vinfz = x[3] * sin(phi)
v0 = [a + b for a, b in zip(v_P0, [Vinfx, Vinfy, Vinfz])]
r, v = propagate_lagrangian(
r_P0, v0, x[4] * x[5] * DAY2SEC, seq[0].mu_central_body)
plot_kepler(
r_P0,
v0,
x[4] *
x[5] *
DAY2SEC,
seq[0].mu_central_body,
N=100,
color='b',
legend=False,
units=AU,
ax=axis)
# Lambert arc to reach seq[1]
dt = (1 - x[4]) * x[5] * DAY2SEC
l = lambert_problem(r, r_P1, dt, seq[0].mu_central_body)
plot_lambert(l, sol=0, color='r', legend=False, units=AU, ax=axis)
v_end_l = l.get_v2()[0]
vinf_in = [a - b for a, b in zip(v_end_l, v_P1)]
_part_plot(x[6:], AU, axis, seq[1:], x[0] + x[5], vinf_in)
return axis
mga_1dsm_tof.plot = _mga_1dsm_tof_plot
del gtoc6, jpl_lp
| gpl-3.0 |
glennq/scikit-learn | benchmarks/bench_isolation_forest.py | 46 | 3782 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
gymar/n-puzzle-solver | gui.py | 3 | 4344 | from tkinter import *
from tkinter import ttk
from solver import *
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
class Application(Tk):
def __init__(self, master):
Tk.__init__(self, master)
self.master = master
self.field()
def field(self):
self.geometry("1200x700")
self.resizable(width=False, height=False)
self.b1 = Button(self, text = 'Graph', height = 1, width = 5, command = self.graphs)
self.b1.place(relx = .95, rely = .85, anchor = "c")
self.b2 = Button(self, text = 'Solve', height = 1, width = 5, command = self.insert)
self.b2.place(relx = .30, rely = .70, anchor = "c")
self.text = Listbox(self, height = 25, width = 75)
self.text.place (relx = .50, rely = .30, anchor = "c")
self.option = OptionMenu(self, '3x3', '4x4', '5x5')
self.option.place(relx = .30, rely = .80, anchor = "c")
self.b2 = Button(self, text = 'Shuffle', height = 1, width = 5, command = self.shuffle)
self.b2.place(relx = .30, rely = 1.0, anchor = "c")
def insert(self):
self.text.insert(END, "SOLUTION:")
goal = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 0]]
start = Node([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 0, 14, 15]], 4)
for a in a_star(start, goal, 4):
for b in a.q:
self.text.insert(END, b)
self.text.insert(END, "")
def shuffle(self):
q = Node([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 0, 14, 15]], 4)
shuffle(q)
def submit(self):
self.b1.config(state='normal')
self.b2.config(state='normal')
self.top.destroy()
def graphs(self):
self.top2 = Toplevel()
self.top2.title("graphs")
self.top2.geometry("300x150+30+30")
self.top2.transient(self)
self.b1.config(state='disabled')
self.b2.config(state='disabled')
self.neural = Label(self.top2, text = "Algorithm neural expander xd")
self.neural.pack()
self.graphButton = Button(self.top2, text="Show!", command = lambda: graph())
self.graphButton.pack()
#test code for graph drawing
def Gen_RandLine( length, dims=2):
lineData = np.empty((dims, length))
lineData[:, 0] = np.random.rand(dims)
for index in range(1, length):
# scaling the random numbers by 0.1 so
# movement is small compared to position.
# subtraction by 0.5 is to change the range to [-0.5, 0.5]
# to allow a line to move backwards.
step = ((np.random.rand(dims) - 0.5) * 0.1)
lineData[:, index] = lineData[:, index - 1] + step
return lineData
def update_lines(num, dataLines, lines):
for line, data in zip(lines, dataLines):
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
return lines
def graph():
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
# Fifty lines of random 3-D lines
data = [Gen_RandLine(25, 3) for index in range(50)]
# Creating fifty line objects.
# NOTE: Can't pass empty arrays into 3d version of plot()
lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
# Setting the axes properties
ax.set_xlim3d([0.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 1.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines),
interval=50, blit=False)
plt.show()
if __name__ == "__main__":
gui = Application(None)
gui.title("Solver")
gui.mainloop()
| mit |
gotomypc/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
sheabrown/faraday_complexity | final/tmp/weighted_test.py | 1 | 5800 | from __future__ import print_function
from keras.models import Model
from keras.layers import Activation, Dense, Dropout, Flatten, Input
from keras.layers import concatenate
from keras.layers import Conv1D, MaxPooling1D
from keras.utils import plot_model
#from time import perf_counter
from loadData import *
import sys
from keras.models import model_from_yaml
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from keras.models import load_model
#f= open('yaml_model','r')
'''f=open('../regularized/model_V1.h5','r')
yaml_strings = f.readlines()
model_string = ''
for y in yaml_strings:
model_string += y
f.close()'''
#model = model_from_yaml(model_string)
model = load_model('../regularized/model_V1.h5')
#plot_model(model, to_file='reg_modelv1.png')
#model.load_weights('inception_weights', by_name=False)
#model.load_weights('../regularized/weights_V1.h5', by_name=False)
dir = '../data/train/'
X_data = np.load(dir+'X_data.npy')
Y_data = np.load(dir+'label.npy')
#------ creation params --------
chi_data = np.load(dir+'chi.npy')
depth_data = np.load(dir+'depth.npy')
flux_data = np.load(dir+'flux.npy')
q_data = np.load(dir+'Q_data.npy')
s_data = np.load(dir+'S_data.npy')
sig_data = np.load(dir+'sig.npy')
u_data = np.load(dir+'U_data.npy')
#for i in range(len(cuts)):
# ------ make any cuts necessary to the data ----------
#cut = cuts[i]
#some are tuples, some are floats -- need to format it
#set aside the complex sources with a certain delta in whatever value
cut_array = sig_data
cut_vals = np.linspace(0,np.max(cut_array)*.9,15)
matrix_vals = []
for c in cut_vals:
print (c)
#do the cut
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[] #X_new = [];Y_new = []
for i in range(len(cut_array)):
val = cut_array[i]
'''if type(val) == tuple_check:
if abs(val[0]-val[1]) >= c:
postcut.append(abs(val[0]-val[1]))
kept.append(i)
#X_new.append(X_data[i])
#Y_new.append(Y_data[i])'''
if val >= c:
postcut.append(val)
kept.append(i)
X_new=np.array([X_data[k] for k in kept])
Y_new=np.array([Y_data[k] for k in kept])
probs = model.predict(X_new)[:,1]
prob = 0.8
predictions = np.where(probs > prob, 1, 0)
#predictions = np_utils.to_categorical(predictions, 2)
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals.append(cm)
correct_indices = np.where(predictions==Y_new)[0]
incorrect_indices = np.where(predictions!=Y_new)[0]
fig = plt.figure(1)
plt.scatter(cut_vals,[float(matrix_vals[i][0,0])/np.sum(matrix_vals[i])*100. for i in range(len(matrix_vals))],label='True Negative',c='g')
plt.scatter(cut_vals,[float(matrix_vals[i][0,1])/np.sum(matrix_vals[i])*100. for i in range(len(matrix_vals))],label='False Positive',c='k')
plt.scatter(cut_vals,[float(matrix_vals[i][1,1])/np.sum(matrix_vals[i])*100. for i in range(len(matrix_vals))],label='True Positive',c='b')
plt.scatter(cut_vals,[float(matrix_vals[i][1,0])/np.sum(matrix_vals[i])*100. for i in range(len(matrix_vals))],label='False Negative',c='r')
plt.xlabel(r'$\sigma $')
plt.ylabel('Percent of Sample')
plt.title(r'Percent Correct over $\sigma$')
plt.legend(loc=(0.3,0.8))
plt.savefig('./cutout_plots/deltasigma.png',bbinches='tight')
plt.show()
tsimple = []
fsimple = []
tcomplex = []
fcomplex = []
for kk in correct_indices:
if predictions[kk] == 0:
tsimple.append(kk)
elif predictions[kk] ==1:
tcomplex.append(kk)
for kk in incorrect_indices:
if predictions[kk] == 0:
fsimple.append(kk)
elif predictions[kk] ==1:
fcomplex.append(kk)
print (len(tcomplex))
num_complex = len(np.where(Y_new == 1)[0])
print(num_complex)
num_simple = float(np.sum(cm)-num_complex)
'''ax.scatter(cut, cm[0,0]/num_simple*100., c='b')
ax.scatter(cut, np.sum(predictions)/num_complex*100., c='g')
#print('No data at this cut (%f), continuing...'%(cut))
ax.set_xlabel(r'$\chi_{0}$')#(r'$\Delta\phi$')
ax.set_ylabel('Percent Correct')
ax.set_title(r'$\chi_{0}$ vs Correctness')
plt.savefig('chi_comp.png')
plt.show()'''
'''fig,ax = plt.subplots(4,3, figsize=(12,12))
#simple-simple
ax[0,0].plot(X_data[tsimple[0]])
ax[0,1].plot(X_data[tsimple[1]])
ax[0,2].plot(X_data[tsimple[2]])
ax[0,0].set_title('Simple. Gave: %i, Expected %i'%(predictions[tsimple[0]], Y_data[tsimple[0]]))
ax[0,1].set_title('Simple. Gave: %i, Expected %i'%(predictions[tsimple[1]], Y_data[tsimple[1]]))
ax[0,2].set_title('Simple. Gave: %i, Expected %i'%(predictions[tsimple[2]], Y_data[tsimple[2]]))
#falsely simple
ax[1,0].plot(X_data[fsimple[0]])
ax[1,1].plot(X_data[fsimple[1]])
ax[1,2].plot(X_data[fsimple[2]])
ax[1,0].set_title('False Simple. Gave: %i, Expected %i'%(predictions[fsimple[0]], Y_data[fsimple[0]]))
ax[1,1].set_title('False Simple. Gave: %i, Expected %i'%(predictions[fsimple[1]], Y_data[fsimple[1]]))
ax[1,2].set_title('False Simple.. Gave: %i, Expected %i'%(predictions[fsimple[1]], Y_data[fsimple[2]]))
#complex complex
ax[2,0].plot(X_data[tcomplex[0]])
ax[2,1].plot(X_data[tcomplex[1]])
ax[2,2].plot(X_data[tcomplex[2]])
ax[2,0].set_title('Complex. Gave: %i, Expected %i'%(predictions[tcomplex[0]], Y_data[tcomplex[0]]))
ax[2,1].set_title('Complex. Gave: %i, Expected %i'%(predictions[tcomplex[1]], Y_data[tcomplex[1]]))
ax[2,2].set_title('Complex. Gave: %i, Expected %i'%(predictions[tcomplex[2]], Y_data[tcomplex[2]]))
#falsely complex
ax[3,0].plot(X_data[fcomplex[0]])
ax[3,1].plot(X_data[fcomplex[1]])
ax[3,2].plot(X_data[fcomplex[2]])
ax[3,0].set_title('False Complex. Gave: %i, Expected %i'%(predictions[fcomplex[0]], Y_data[fcomplex[0]]))
ax[3,1].set_title('False Complex. Gave: %i, Expected %i'%(predictions[fcomplex[1]], Y_data[fcomplex[1]]))
ax[3,2].set_title('False Complex. Gave: %i, Expected %i'%(predictions[fcomplex[2]], Y_data[fcomplex[2]]))
plt.savefig('fit_res.png',bbinches='tight')
#plt.show()'''
| mit |
antgonza/qiime | tests/test_make_distance_boxplots.py | 15 | 13962 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
"""Test suite for the make_distance_boxplots.py module."""
from unittest import TestCase, main
from matplotlib.figure import Figure
from numpy.testing import assert_almost_equal
from qiime.make_distance_boxplots import (_cast_y_axis_extrema,
_color_field_states,
make_distance_boxplots,
_sort_distributions)
class MakeDistanceBoxplotsTests(TestCase):
"""Tests for the make_distance_boxplots.py module."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
self.map_f = map_lines.split('\n')
self.dm_f = dm_lines.split('\n')
self.too_many_colors_map_f = too_many_colors_map_lines.split('\n')
def test_cast_y_axis_extrema(self):
"""Test correctly assigns colors to a field based on another field."""
obs = _cast_y_axis_extrema(1.0)
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema(1)
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('1.0')
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('1')
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('auto')
self.assertIsNone(obs)
def test_cast_y_axis_extrema_invalid_input(self):
"""Test correctly raises an error on bad input."""
self.assertRaises(ValueError, _cast_y_axis_extrema, 'foo')
def test_color_field_states(self):
"""Test correctly assigns colors to a field based on another field."""
# All sample IDs and field states.
exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0)],
{'y': (0.0, 0.0, 1.0), 'x': (1.0, 0.0, 0.0)})
obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],
'Foo', ['a', 'b', 'c'], 'Bar')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['x'], exp[1]['x'])
assert_almost_equal(obs[1]['y'], exp[1]['y'])
# Subset of sample IDs and field states.
exp = ([(1.0, 0.0, 0.0)], {'x': (1.0, 0.0, 0.0)})
obs = _color_field_states(self.map_f, ['1', '2'], 'Foo', ['a'], 'Bar')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['x'], exp[1]['x'])
# Color field by itself (useless but still allowed).
exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.9490196078431372,
0.45098039215686275, 0.01568627450980392)], {'a':
(1.0, 0.0, 0.0),
'c': (0.9490196078431372, 0.45098039215686275,
0.01568627450980392), 'b': (0.0, 0.0, 1.0)})
obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],
'Foo', ['a', 'b', 'c'], 'Foo')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['a'], exp[1]['a'])
assert_almost_equal(obs[1]['b'], exp[1]['b'])
assert_almost_equal(obs[1]['c'], exp[1]['c'])
def test_color_field_states_invalid_input(self):
"""Test correctly raises error on invalid input."""
# Field to color not in mapping file.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Fooz', ['a', 'b'], 'Bar')
# Field to color by not in mapping file.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Foo', ['a', 'b'], 'Barz')
# Field states are not found in field (due to subset of sample IDs).
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Foo', ['a', 'c'], 'Bar')
# Field states are not found in field (not in column at all).
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'z'], 'Bar')
# Not enough colors.
samp_ids = [str(i) for i in range(1, 31)]
self.assertRaises(ValueError, _color_field_states,
self.too_many_colors_map_f, samp_ids, 'Description', samp_ids,
'Description')
# No one-to-one mapping.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'b'], 'Baz')
def test_make_distance_boxplots(self):
"""Test correctly generates plot, raw data, and labels."""
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo', 'Bar'])
self.assertEqual(len(obs), 2)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(obs[0][4], [None, None, None, None, None, None, None])
self.assertEqual(obs[1][0], 'Bar')
self.assertTrue(isinstance(obs[1][1], Figure))
self.assertEqual(len(obs[1][2]), 5)
self.assertEqual(len(obs[1][3]), 5)
self.assertEqual(obs[1][4], [None, None, None, None, None])
def test_make_distance_boxplots_suppress_plots(self):
"""Test correctly suppresses different plot types."""
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 4)
self.assertEqual(len(obs[0][3]), 4)
self.assertEqual(obs[0][4], [None, None, None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 3)
self.assertEqual(len(obs[0][3]), 3)
self.assertEqual(obs[0][4], [None, None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True,
suppress_individual_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 1)
self.assertEqual(len(obs[0][3]), 1)
self.assertEqual(obs[0][4], [None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True,
suppress_individual_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 2)
self.assertEqual(len(obs[0][3]), 2)
self.assertEqual(obs[0][4], [None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_individual_within=True,
suppress_individual_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 2)
self.assertEqual(len(obs[0][3]), 2)
self.assertEqual(obs[0][4], [None, None])
def test_make_distance_boxplots_box_color(self):
"""Test correctly colors boxes in a variety of ways."""
# Single box color for all.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
box_color='r')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 5)
self.assertEqual(len(obs[0][3]), 5)
self.assertEqual(obs[0][4], ['r', 'r', 'r', 'r', 'r'])
# Single box color, with some plots suppressed.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
box_color='r',
suppress_individual_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 3)
self.assertEqual(len(obs[0][3]), 3)
self.assertEqual(obs[0][4], ['r', 'r', 'r'])
# Color individual within boxes.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo'],
color_individual_within_by_field='Bar')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(len(obs[0][4]), 7)
self.assertIsNone(obs[0][4][0])
self.assertIsNone(obs[0][4][1])
self.assertEqual(obs[0][4][2], (1.0, 0.0, 0.0))
self.assertEqual(obs[0][4][3], (0.0, 0.0, 1.0))
self.assertIsNone(obs[0][4][4])
self.assertIsNone(obs[0][4][5])
self.assertIsNone(obs[0][4][6])
# Color individual within boxes, make sure box_color is ignored.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo'],
box_color='pink', color_individual_within_by_field='Bar')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(len(obs[0][4]), 7)
self.assertIsNone(obs[0][4][0])
self.assertIsNone(obs[0][4][1])
self.assertEqual(obs[0][4][2], (1.0, 0.0, 0.0))
self.assertEqual(obs[0][4][3], (0.0, 0.0, 1.0))
self.assertIsNone(obs[0][4][4])
self.assertIsNone(obs[0][4][5])
self.assertIsNone(obs[0][4][6])
def test_make_distance_boxplots_invalid_input(self):
"""Test correctly raises an error on invalid input."""
# No fields provided.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, None)
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, [])
# Nonexistent field.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'foobarbaz'])
# Invalid width/height.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], width=-1, height=5)
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], width=1, height=0)
# Suppress everything.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], suppress_all_within=True,
suppress_all_between=True, suppress_individual_within=True,
suppress_individual_between=True)
def test_sort_distributions_median(self):
"""Test correctly sorts distributions by median."""
exp = [([0, 0, 0, 1], [2, 1, 1], [1], [1, 2, 3]),
('bar', 'baz', 'zab', 'foo'), ('b', 'r', 'b', 'w')]
obs = _sort_distributions(
[[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],
['foo', 'baz', 'bar', 'zab'], ['w', 'r', 'b', 'b'], 'median')
self.assertEqual(obs, exp)
def test_sort_distributions_alphabetical(self):
"""Test correctly sorts distributions alphabetically."""
exp = [([2, 1, 1], [1, 2, 3], [0, 0, 0, 1], [1]),
('baz', 'foo', 'foo', 'zab'), ('r', 'w', 'b', 'b')]
obs = _sort_distributions(
[[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],
['foo', 'baz', 'foo', 'zab'], ['w', 'r', 'b', 'b'], 'alphabetical')
self.assertEqual(obs, exp)
def test_sort_distributions_invalid_input(self):
"""Correctly raises error on invalid input."""
# Unfortunately, this code doesn't support the brosort algorithm... :(
with self.assertRaises(ValueError):
_ = _sort_distributions([[1, 2, 3], [3, 2, 1]], ['foo', 'bar'],
['r', 'b'], 'brosort')
map_lines = """#SampleID\tFoo\tBar\tBaz\tDescription
1\ta\tx\tm\t1
2\tb\ty\tn\t2
3\ta\tx\tm\t3
4\ta\tx\tn\t4
5\tb\ty\tn\t5
6\tc\tx\tm\t6"""
dm_lines = """\t1\t2\t3\t4\t5\t6
1\t0\t1\t2\t4\t7\t11
2\t1\t0\t3\t5\t8\t12
3\t2\t3\t0\t6\t9\t13
4\t4\t5\t6\t0\t10\t14
5\t7\t8\t9\t10\t0\t15
6\t11\t12\t13\t14\t15\t0"""
too_many_colors_map_lines = """#SampleID\tDescription
1\t1
2\t2
3\t3
4\t4
5\t5
6\t6
7\t7
8\t8
9\t9
10\t10
11\t11
12\t12
13\t13
14\t14
15\t15
16\t16
17\t17
18\t18
19\t19
20\t20
21\t21
22\t22
23\t23
24\t24
25\t25
26\t26
27\t27
28\t28
29\t29
30\t30"""
if __name__ == "__main__":
main()
| gpl-2.0 |
juselius/gimic | src/pygimic/streamplot.py | 1 | 15026 | """
Streamline plotting like Mathematica.
Copyright (c) 2011 Tom Flannaghan.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
version = '4'
import numpy
import pylab
import matplotlib
import matplotlib.patches as mpp
def streamplot(x, y, u, v, density=1, linewidth=1,
color='k', cmap=None, norm=None, vmax=None, vmin=None,
arrowsize=1, INTEGRATOR='RK4'):
'''Draws streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines. For different
densities in each direction, use a tuple or list [densityx, densityy].
* linewidth is either a number (uniform lines) or a 2d array
(variable linewidth).
* color is either a color code (of any kind) or a 2d array. This is
then transformed into color by the cmap, norm, vmin and vmax args.
A value of None gives the default for each.
INTEGRATOR is experimental. Currently, RK4 should be used.
'''
## Sanity checks.
assert len(x.shape)==1
assert len(y.shape)==1
assert u.shape == (len(y), len(x))
assert v.shape == (len(y), len(x))
if type(linewidth) == numpy.ndarray:
assert linewidth.shape == (len(y), len(x))
if type(color) == numpy.ndarray:
assert color.shape == (len(y), len(x))
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = numpy.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
## Blank array: This is the heart of the algorithm. It begins life
## zeroed, but is set to one when a streamline passes through each
## box. Then streamlines are only allowed to pass through zeroed
## boxes. The lower resolution of this grid determines the
## approximate spacing between trajectories.
if type(density) == float or type(density) == int:
assert density > 0
NBX = int(30*density)
NBY = int(30*density)
else:
assert len(density) > 0
NBX = int(30*density[0])
NBY = int(30*density[1])
blank = numpy.zeros((NBY,NBX))
## Constants for conversion between grid-index space and
## blank-index space
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
## Takes grid space coords and returns nearest space in
## the blank array.
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
## Linear interpolation - nice and quick because we are
## working in grid-index coordinates.
if type(xi) == numpy.ndarray:
x = xi.astype(numpy.int)
y = yi.astype(numpy.int)
else:
x = numpy.int(xi)
y = numpy.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
## Alternative Integrator function
## RK45 does not really help in it's current state. The
## resulting trajectories are accurate but low-resolution in
## regions of high curvature and thus fairly ugly. Maybe a
## curvature based cap on the maximum ds permitted is the way
## forward.
def rk45(x0, y0, f):
maxerror = 0.001
maxds = 0.03
ds = 0.03
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK45
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .25*ds*k1x,
yi + .25*ds*k1y)
k3x, k3y = f(xi + 3./32*ds*k1x + 9./32*ds*k2x,
yi + 3./32*ds*k1y + 9./32*ds*k2y)
k4x, k4y = f(xi + 1932./2197*ds*k1x - 7200./2197*ds*k2x + 7296./2197*ds*k3x,
yi + 1932./2197*ds*k1y - 7200./2197*ds*k2y + 7296./2197*ds*k3y)
k5x, k5y = f(xi + 439./216*ds*k1x - 8*ds*k2x + 3680./513*ds*k3x - 845./4104*ds*k4x,
yi + 439./216*ds*k1y - 8*ds*k2y + 3680./513*ds*k3y - 845./4104*ds*k4y)
k6x, k6y = f(xi - 8./27*ds*k1x + 2*ds*k2x - 3544./2565*ds*k3x + 1859./4104*ds*k4x - 11./40*ds*k5x,
yi - 8./27*ds*k1y + 2*ds*k2y - 3544./2565*ds*k3y + 1859./4104*ds*k4y - 11./40*ds*k5y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
dx4 = ds*(25./216*k1x + 1408./2565*k3x + 2197./4104*k4x - 1./5*k5x)
dy4 = ds*(25./216*k1y + 1408./2565*k3y + 2197./4104*k4y - 1./5*k5y)
dx5 = ds*(16./135*k1x + 6656./12825*k3x + 28561./56430*k4x - 9./50*k5x + 2./55*k6x)
dy5 = ds*(16./135*k1y + 6656./12825*k3y + 28561./56430*k4y - 9./50*k5y + 2./55*k6y)
## Error is normalized to the axes coordinates (it's a distance)
error = numpy.sqrt(((dx5-dx4)/NGX)**2 + ((dy5-dy4)/NGY)**2)
if error < maxerror:
# Step is within tolerance so continue
xi += dx5
yi += dy5
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
# Modify ds for the next iteration.
if len(xf_traj) > 2:
## hacky curvature dependance:
v1 = numpy.array((xf_traj[-1]-xf_traj[-2], yf_traj[-1]-yf_traj[-2]))
v2 = numpy.array((xf_traj[-2]-xf_traj[-3], yf_traj[-2]-yf_traj[-3]))
costheta = (v1/numpy.sqrt((v1**2).sum()) * v2/numpy.sqrt((v2**2).sum())).sum()
if costheta < .8:
ds = .01
continue
ds = min(maxds, 0.85*ds*(maxerror/error)**.2)
return stotal, xf_traj, yf_traj
## Forward and backward trajectories
if INTEGRATOR == 'RK4':
integrator = rk4
elif INTEGRATOR == 'RK45':
integrator = rk45
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t != None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))/2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
## PLOTTING HERE.
#pylab.pcolormesh(numpy.linspace(x.min(), x.max(), NBX+1),
# numpy.linspace(y.min(), y.max(), NBY+1), blank)
# Load up the defaults - needed to get the color right.
if type(color) == numpy.ndarray:
if vmin == None: vmin = color.min()
if vmax == None: vmax = color.max()
if norm == None: norm = matplotlib.colors.normalize
if cmap == None: cmap = matplotlib.cm.get_cmap(
matplotlib.rcParams['image.cmap'])
for t in trajectories:
# Finally apply the rescale to adjust back to user-coords from
# grid-index coordinates.
tx = numpy.array(t[0])*DX+XOFF
ty = numpy.array(t[1])*DY+YOFF
tgx = numpy.array(t[0])
tgy = numpy.array(t[1])
points = numpy.array([tx, ty]).T.reshape(-1,1,2)
segments = numpy.concatenate([points[:-1], points[1:]], axis=1)
args = {}
if type(linewidth) == numpy.ndarray:
args['linewidth'] = value_at(linewidth, tgx, tgy)[:-1]
arrowlinewidth = args['linewidth'][len(tgx)/2]
else:
args['linewidth'] = linewidth
arrowlinewidth = linewidth
if type(color) == numpy.ndarray:
args['color'] = cmap(norm(vmin=vmin,vmax=vmax)
(value_at(color, tgx, tgy)[:-1]))
arrowcolor = args['color'][len(tgx)/2]
else:
args['color'] = color
arrowcolor = color
lc = matplotlib.collections.LineCollection\
(segments, **args)
pylab.gca().add_collection(lc)
## Add arrows half way along each trajectory.
n = len(tx)/2
p = mpp.FancyArrowPatch((tx[n],ty[n]), (tx[n+1],ty[n+1]),
arrowstyle='->', lw=arrowlinewidth,
mutation_scale=20*arrowsize, color=arrowcolor)
pylab.gca().add_patch(p)
pylab.xlim(x.min(), x.max())
pylab.ylim(y.min(), y.max())
return
def test():
pylab.figure(1)
x = numpy.linspace(-3,3,100)
y = numpy.linspace(-3,3,100)
u = -1-x**2+y[:,numpy.newaxis]
v = 1+x-y[:,numpy.newaxis]**2
speed = numpy.sqrt(u*u + v*v)
pylab.subplot(121)
streamplot(x, y, u, v, density=1, INTEGRATOR='RK4', color='b')
pylab.subplot(122)
streamplot(x, y, u, v, density=(1,1), INTEGRATOR='RK4', color=u,
linewidth=5*speed/speed.max())
pylab.show()
if __name__ == '__main__':
test()
| gpl-2.0 |
Kongsea/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 48 | 11896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data")
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
axbaretto/beam | sdks/python/apache_beam/dataframe/expressions.py | 1 | 15120 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
"""A session represents a mapping of expressions to concrete values.
The bindings typically include required placeholders, but may be any
intermediate expression as well.
"""
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr): # type: (Expression) -> Any
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr): # type: (Expression) -> Any
return self._bindings[expr]
class PartitioningSession(Session):
"""An extension of Session that enforces actual partitioning of inputs.
Each expression is evaluated multiple times for various supported
partitionings determined by its `requires_partition_by` specification. For
each tested partitioning, the input is partitioned and the expression is
evaluated on each partition separately, as if this were actually executed in
a parallel manner.
For each input partitioning, the results are verified to be partitioned
appropriately according to the expression's `preserves_partition_by`
specification.
For testing only.
"""
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None] # Create at least one entry.
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = output_partitioning(
expr, input_partitioning)
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
# Choose any single session.
return next(iter(parts.values())).evaluate(expr)
# Store random state so it can be re-used for each execution, in case
# the expression is part of a test that relies on the random seed.
random_state = random.getstate()
result = None
# Run with all supported partitionings s.t. the smallest subpartitioning
# is used last. This way the final result is computed with the most
# challenging partitioning. Avoids heisenbugs where sometimes the result
# is computed trivially with Singleton partitioning and passes.
for input_partitioning in sorted(set([expr.requires_partition_by(),
partitionings.Arbitrary(),
partitionings.Index(),
partitionings.Singleton()])):
if not expr.requires_partition_by().is_subpartitioning_of(
input_partitioning):
continue
random.setstate(random_state)
result = evaluate_with(input_partitioning)
assert result is not None
self._bindings[expr] = result
return self._bindings[expr]
# The return type of an Expression
T = TypeVar('T')
def output_partitioning(expr, input_partitioning):
""" Return the expected output partitioning for `expr` when it's input is
partitioned by `input_partitioning`.
For internal use only; No backward compatibility guarantees """
assert expr.requires_partition_by().is_subpartitioning_of(input_partitioning)
if expr.preserves_partition_by().is_subpartitioning_of(input_partitioning):
return min(input_partitioning, expr.preserves_partition_by())
else:
return partitionings.Arbitrary()
class Expression(object):
"""An expression is an operation bound to a set of arguments.
An expression represents a deferred tree of operations, which can be
evaluated at a specific bindings of root expressions to values.
requires_partition_by indicates the upper bound of a set of partitionings that
are acceptable inputs to this expression. The expression should be able to
produce the correct result when given input(s) partitioned by its
requires_partition_by attribute, or by any partitoning that is _not_
a subpartitioning of it.
preserves_partition_by indicates the upper bound of a set of partitionings
that can be preserved by this expression. When the input(s) to this expression
are partitioned by preserves_partition_by, or by any partitioning that is
_not_ a subpartitioning of it, this expression should produce output(s)
partitioned by the same partitioning.
However, if the partitioning of an expression's input is a subpartitioning of
the partitioning that it preserves, the output is presumed to have no
particular partitioning (i.e. Arbitrary()).
For example, let's look at an "element-wise operation", that has no
partitioning requirement, and preserves any partitioning given to it::
requires_partition_by = Arbitrary() -----------------------------+
|
+-----------+-------------+---------- ... ----+---------|
| | | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
| | | | |
+-----------+-------------+---------- ... ----+---------|
|
preserves_partition_by = Arbitrary() ----------------------------+
As a more interesting example, consider this expression, which requires Index
partitioning, and preserves just Singleton partitioning::
requires_partition_by = Index() -----------------------+
|
+-----------+-------------+---------- ... ----|
| | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
|
|
preserves_partition_by = Singleton()
Note that any non-Arbitrary partitioning is an acceptable input for this
expression. However, unless the inputs are Singleton-partitioned, the
expression makes no guarantees about the partitioning of the output.
"""
def __init__(
self,
name, # type: str
proxy, # type: T
_id=None # type: Optional[str]
):
self._name = name
self._proxy = proxy
# Store for preservation through pickling.
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self): # type: () -> T
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
"""Returns all the placeholders that self depends on."""
raise NotImplementedError(type(self))
def evaluate_at(self, session): # type: (Session) -> T
"""Returns the result of self with the bindings given in session."""
raise NotImplementedError(type(self))
def requires_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, require to evaluate this expression.
Returns partitioning.Arbitrary() to require no partitioning is required.
"""
raise NotImplementedError(type(self))
def preserves_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, preserved by this expression.
This gives an upper bound on the partitioning of its ouput. The actual
partitioning of the output may be less strict (e.g. if the input was
less partitioned).
"""
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
"""An expression whose value must be explicitly bound in the session."""
def __init__(
self, # type: PlaceholderExpression
proxy, # type: T
reference=None, # type: Any
):
"""Initialize a placeholder expression.
Args:
proxy: A proxy object with the type expected to be bound to this
expression. Used for type checking at pipeline construction time.
"""
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Index()
class ConstantExpression(Expression):
"""An expression whose value is known at pipeline construction time."""
def __init__(
self, # type: ConstantExpression
value, # type: T
proxy=None # type: Optional[T]
):
"""Initialize a constant expression.
Args:
value: The constant value to be produced by this expression.
proxy: (Optional) a proxy object with same type as `value` to use for
rapid type checking at pipeline construction time. If not provided,
`value` will be used directly.
"""
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Arbitrary()
class ComputedExpression(Expression):
"""An expression whose value must be computed at pipeline execution time."""
def __init__(
self, # type: ComputedExpression
name, # type: str
func, # type: Callable[...,T]
args, # type: Iterable[Expression]
proxy=None, # type: Optional[T]
_id=None, # type: Optional[str]
requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning
preserves_partition_by=partitionings.Singleton(), # type: partitionings.Partitioning
):
"""Initialize a computed expression.
Args:
name: The name of this expression.
func: The function that will be used to compute the value of this
expression. Should accept arguments of the types returned when
evaluating the `args` expressions.
args: The list of expressions that will be used to produce inputs to
`func`.
proxy: (Optional) a proxy object with same type as the objects that this
ComputedExpression will produce at execution time. If not provided, a
proxy will be generated using `func` and the proxies of `args`.
_id: (Optional) a string to uniquely identify this expression.
requires_partition_by: The required (common) partitioning of the args.
preserves_partition_by: The level of partitioning preserved.
"""
if (not _get_allow_non_parallel() and
isinstance(requires_partition_by, partitionings.Singleton)):
reason = requires_partition_by.reason or (
f"Encountered non-parallelizable form of {name!r}.")
raise NonParallelOperation(
f"{reason}\n"
"Consider using an allow_non_parallel_operations block if you're "
"sure you want to do this. See "
"https://s.apache.org/dataframe-non-parallel-operations for more "
"information.")
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
def __init__(self, msg):
super(NonParallelOperation, self).__init__(self, msg)
self.msg = msg
| apache-2.0 |
peastman/deepchem | contrib/dragonn/tutorial_utils.py | 6 | 10169 | from __future__ import division
import random
random.seed(1)
import inspect
from collections import namedtuple, defaultdict, OrderedDict
import numpy as np
np.random.seed(1)
from sklearn.model_selection import train_test_split
#from simdna import simulations
import simulations
from simdna.synthetic import StringEmbeddable
from utils import get_motif_scores, one_hot_encode
from models import SequenceDNN
from dragonn.plot import add_letters_to_axis, plot_motif
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def SequenceDNN_learning_curve(dnn):
if dnn.valid_metrics is not None:
train_losses, valid_losses = [
np.array([epoch_metrics['Loss']
for epoch_metrics in metrics])
for metrics in (dnn.train_metrics, dnn.valid_metrics)
]
min_loss_indx = min(enumerate(valid_losses), key=lambda x: x[1])[0]
f = plt.figure(figsize=(10, 4))
ax = f.add_subplot(1, 1, 1)
ax.plot(range(len(train_losses)), train_losses, 'b', label='Training', lw=4)
ax.plot(
range(len(train_losses)), valid_losses, 'r', label='Validation', lw=4)
ax.plot([min_loss_indx, min_loss_indx], [0, 1.0], 'k--', label='Early Stop')
ax.legend(loc="upper right")
ax.set_ylabel("Loss")
ax.set_ylim((0.0, 1.0))
ax.set_xlabel("Epoch")
plt.show()
else:
print("learning curve can only be obtained after training!")
def test_SequenceDNN(dnn, simulation_data):
print("Test performance:")
print(dnn.test(simulation_data.X_test, simulation_data.y_test))
def plot_motifs(simulation_data):
for motif_name in simulation_data.motif_names:
plot_motif(motif_name, figsize=(10, 4), ylab=motif_name)
def plot_sequence_filters(dnn):
fig = plt.figure(figsize=(15, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
conv_filters = dnn.get_sequence_filters()
num_plots_per_axis = int(len(conv_filters)**0.5) + 1
for i, conv_filter in enumerate(conv_filters):
ax = fig.add_subplot(num_plots_per_axis, num_plots_per_axis, i + 1)
add_letters_to_axis(ax, conv_filter.T)
ax.axis("off")
ax.set_title("Filter %s" % (str(i + 1)))
def plot_SequenceDNN_layer_outputs(dnn, simulation_data):
# define layer out functions
import theano
get_conv_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[0].get_output(train=False),
allow_input_downcast=True)
get_conv_relu_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[1].get_output(train=False),
allow_input_downcast=True)
get_maxpool_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[-4].get_output(train=False),
allow_input_downcast=True)
# get layer outputs for a positive simulation example
pos_indx = np.where(simulation_data.y_valid == 1)[0][0]
pos_X = simulation_data.X_valid[pos_indx:(pos_indx + 1)]
conv_outputs = get_conv_output(pos_X).squeeze()
conv_relu_outputs = get_conv_relu_output(pos_X).squeeze()
maxpool_outputs = get_maxpool_output(pos_X).squeeze()
# plot layer outputs
fig = plt.figure(figsize=(15, 12))
ax1 = fig.add_subplot(3, 1, 3)
heatmap = ax1.imshow(
conv_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax1.set_ylabel("Convolutional Filters")
ax1.set_xlabel("Position")
ax1.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.set_title("SequenceDNN outputs from convolutional layer.\t\
Locations of motif sites are highlighted in grey.")
ax2 = fig.add_subplot(3, 1, 2)
heatmap = ax2.imshow(
conv_relu_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax2.set_ylabel("Convolutional Filters")
ax2.get_yaxis().set_ticks([])
ax2.get_xaxis().set_ticks([])
ax2.set_title("Convolutional outputs after ReLU transformation.\t\
Locations of motif sites are highlighted in grey.")
ax3 = fig.add_subplot(3, 1, 1)
heatmap = ax3.imshow(
maxpool_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax3.set_title("DNN outputs after max pooling")
ax3.set_ylabel("Convolutional Filters")
ax3.get_yaxis().set_ticks([])
ax3.get_xaxis().set_ticks([])
# highlight motif sites
motif_scores = get_motif_scores(pos_X, simulation_data.motif_names)
motif_sites = [np.argmax(motif_scores[0, i, :]) for i in [0, 1]]
for motif_site in motif_sites:
conv_output_start = motif_site - max(dnn.conv_width - 10, 0)
conv_output_stop = motif_site + max(dnn.conv_width - 10, 0)
ax1.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
ax2.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
def interpret_SequenceDNN_filters(dnn, simulation_data):
print("Plotting simulation motifs...")
plot_motifs(simulation_data)
plt.show()
print("Visualizing convolutional sequence filters in SequenceDNN...")
plot_sequence_filters(dnn)
plt.show()
def interpret_data_with_SequenceDNN(dnn, simulation_data):
# get a positive and a negative example from the simulation data
pos_indx = np.flatnonzero(simulation_data.y_valid == 1)[2]
neg_indx = np.flatnonzero(simulation_data.y_valid == 0)[2]
pos_X = simulation_data.X_valid[pos_indx:pos_indx + 1]
neg_X = simulation_data.X_valid[neg_indx:neg_indx + 1]
# get motif scores, ISM scores, and DeepLIFT scores
scores_dict = defaultdict(OrderedDict)
scores_dict['Positive']['Motif Scores'] = get_motif_scores(
pos_X, simulation_data.motif_names)
scores_dict['Positive']['ISM Scores'] = dnn.in_silico_mutagenesis(pos_X).max(
axis=-2)
scores_dict['Positive']['DeepLIFT Scores'] = dnn.deeplift(pos_X).max(axis=-2)
scores_dict['Negative']['Motif Scores'] = get_motif_scores(
neg_X, simulation_data.motif_names)
scores_dict['Negative']['ISM Scores'] = dnn.in_silico_mutagenesis(neg_X).max(
axis=-2)
scores_dict['Negative']['DeepLIFT Scores'] = dnn.deeplift(neg_X).max(axis=-2)
# get motif site locations
motif_sites = {
key: [
embedded_motif.startPos + len(embedded_motif.what.string) // 2
for embedded_motif in (next(
embedded_motif
for embedded_motif in simulation_data.valid_embeddings[index]
if isinstance(embedded_motif.what, StringEmbeddable) and
motif_name in embedded_motif.what.stringDescription)
for motif_name in simulation_data.motif_names)
]
for key, index in (('Positive', pos_indx), ('Negative', neg_indx))
}
# organize legends
motif_label_dict = {}
motif_label_dict['Motif Scores'] = simulation_data.motif_names
if len(simulation_data.motif_names) == dnn.num_tasks:
motif_label_dict['ISM Scores'] = simulation_data.motif_names
else:
motif_label_dict['ISM Scores'] = ['_'.join(simulation_data.motif_names)]
motif_label_dict['DeepLIFT Scores'] = motif_label_dict['ISM Scores']
# plot scores and highlight motif site locations
seq_length = pos_X.shape[-1]
plots_per_row = 2
plots_per_column = 3
ylim_dict = {
'Motif Scores': (-80, 30),
'ISM Scores': (-1.5, 3.0),
'DeepLIFT Scores': (-1.5, 3.0)
}
motif_colors = ['b', 'r', 'c', 'm', 'g', 'k', 'y']
font_size = 12
num_x_ticks = 5
highlight_width = 5
motif_labels_cache = []
f = plt.figure(figsize=(10, 12))
f.subplots_adjust(hspace=0.15, wspace=0.15)
f.set_tight_layout(True)
for j, key in enumerate(['Positive', 'Negative']):
for i, (score_type, scores) in enumerate(scores_dict[key].items()):
ax = f.add_subplot(plots_per_column, plots_per_row,
plots_per_row * i + j + 1)
ax.set_ylim(ylim_dict[score_type])
ax.set_xlim((0, seq_length))
ax.set_frame_on(False)
if j == 0: # put y axis and ticks only on left side
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=2))
ax.get_yaxis().tick_left()
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_ylabel(score_type)
if j > 0: # remove y axes
ax.get_yaxis().set_visible(False)
if i < (plots_per_column - 1): # remove x axes
ax.get_xaxis().set_visible(False)
if i == (plots_per_column - 1): # set x axis and ticks on bottom
ax.set_xticks(seq_length / num_x_ticks * (np.arange(num_x_ticks + 1)))
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2))
ax.get_xaxis().tick_bottom()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_xlabel("Position")
if j > 0 and i < (plots_per_column - 1): # remove all axes
ax.axis('off')
add_legend = False
for _i, motif_label in enumerate(motif_label_dict[score_type]):
if score_type == 'Motif Scores':
scores_to_plot = scores[0, _i, :]
else:
scores_to_plot = scores[0, 0, 0, :]
if motif_label not in motif_labels_cache:
motif_labels_cache.append(motif_label)
add_legend = True
motif_color = motif_colors[motif_labels_cache.index(motif_label)]
ax.plot(scores_to_plot, label=motif_label, c=motif_color)
if add_legend:
leg = ax.legend(
loc=[0, 0.85],
frameon=False,
fontsize=font_size,
ncol=3,
handlelength=-0.5)
for legobj in leg.legendHandles:
legobj.set_color('w')
for _j, text in enumerate(leg.get_texts()):
text_color = motif_colors[motif_labels_cache.index(
motif_label_dict[score_type][_j])]
text.set_color(text_color)
for motif_site in motif_sites[key]:
ax.axvspan(
motif_site - highlight_width,
motif_site + highlight_width,
color='grey',
alpha=0.1)
| mit |
maheshakya/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 8 | 7418 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespectively of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import pylab as pl
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
pl.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
pl.loglog(n_samples_range, min_n_components, color=color)
pl.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
pl.xlabel("Number of observations to eps-embed")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
pl.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
pl.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
pl.semilogy(eps_range, min_n_components, color=color)
pl.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
pl.xlabel("Distortion eps")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
pl.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
pl.figure()
pl.hexbin(dists, projected_dists, gridsize=100)
pl.xlabel("Pairwise squared distances in original space")
pl.ylabel("Pairwise squared distances in projected space")
pl.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = pl.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
pl.figure()
pl.hist(rates, bins=50, normed=True, range=(0., 2.))
pl.xlabel("Squared distances rate: projected / original")
pl.ylabel("Distribution of samples pairs")
pl.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
pl.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
| bsd-3-clause |
zhenv5/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
dereknewman/cancer_detection | extract_cubes.py | 1 | 7181 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 12:51:14 2017
@author: derek
"""
import pandas as pd
import SimpleITK
import numpy as np
import tensorflow as tf
import cv2
TARGET_VOXEL_MM = 0.682
BASE_DIR = "/media/derek/disk1/kaggle_ndsb2017/"
def normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to -0.5 to 0.5
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
image -= 0.5
return image
def extract_cube(image_array,z_perc,y_perc,x_perc):
"""extract a 32x32x32 chunk from data specified by the center in percentage
(z_perc,y_perc, x_perc)
Args:
image_array: full size image data cube
z_perc: the z dimensional center given as a percentage of the total z
y_perc: the y dimensional center given as a percentage of the total y
x_perc: the x dimensional center given as a percentage of the total x
Returns:
image_cube: 32x32x32 subsection of image_arrary centered at (z,y,x)
"""
im_z, im_y, im_x = image_array.shape
z_min = int(round(z_perc*im_z)) - 16
y_min = int(round(y_perc*im_y)) - 16
x_min = int(round(x_perc*im_x)) - 16
z_max = int(round(z_perc*im_z)) + 16
y_max = int(round(y_perc*im_y)) + 16
x_max = int(round(x_perc*im_x)) + 16
if z_min < 0:
z_max = z_max + abs(z_min)
z_min = 0
if y_min < 0:
y_max = y_max + abs(y_min)
y_min = 0
if x_min < 0:
x_max = x_max + abs(x_min)
x_min = 0
if z_max > im_z:
z_min = z_min - abs(z_max - im_z)
z_max = im_z
if y_max > im_y:
y_min = y_min - abs(y_max - im_y)
y_max = im_y
if x_max > im_x:
x_min = x_min - abs(x_max - im_x)
x_max = im_x
image_cube = image_array[z_min:z_max,y_min:y_max,x_min:x_max]
return image_cube
def add_to_tfrecord(writer,image_cube, label):
"""add a tfrecord to a tfwriter
Args:
writer: tfwriter
image_cube: usually 32x32x32 cube o data
label: associated truth label for data (usually maligancy, lobulation, spiculation)
Returns:
Nothing
"""
image_cube = np.asarray(image_cube,np.int16) #ensure data is in int16
image_shape = image_cube.shape
binary_cube = image_cube.tobytes()
binary_label = np.array(image_label, np.int16).tobytes()
binary_shape = np.array(image_shape, np.int16).tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_shape])),
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_label])),
'cube': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_cube]))
}))
writer.write(example.SerializeToString())
def rescale_patient_images(images_zyx, org_spacing_xyz, target_voxel_mm, verbose=False):
"""rescale patient images (3d cube data) to target_voxel_mm
Args:
images_zyx: full size image data cube
org_spacing_xyz: original spacing
target_voxel_mm: size of rescaled voxels
verbose: print extra info
Returns:
image_cube: 32x32x32 subsection of image_arrary centered at (z,y,x)
"""
if verbose:
print("Spacing: ", org_spacing_xyz)
print("Shape: ", images_zyx.shape)
# print "Resizing dim z"
resize_x = 1.0
resize_y = float(org_spacing_xyz[2]) / float(target_voxel_mm)
interpolation = cv2.INTER_LINEAR
res = cv2.resize(images_zyx, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation) # opencv assumes y, x, channels umpy array, so y = z pfff
# print "Shape is now : ", res.shape
res = res.swapaxes(0, 2)
res = res.swapaxes(0, 1)
resize_x = float(org_spacing_xyz[0]) / float(target_voxel_mm)
resize_y = float(org_spacing_xyz[1]) / float(target_voxel_mm)
# cv2 can handle max 512 channels..
if res.shape[2] > 512:
res = res.swapaxes(0, 2)
res1 = res[:256]
res2 = res[256:]
res1 = res1.swapaxes(0, 2)
res2 = res2.swapaxes(0, 2)
res1 = cv2.resize(res1, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res2 = cv2.resize(res2, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res1 = res1.swapaxes(0, 2)
res2 = res2.swapaxes(0, 2)
res = np.vstack([res1, res2])
res = res.swapaxes(0, 2)
else:
res = cv2.resize(res, dsize=None, fx=resize_x, fy=resize_y, interpolation=interpolation)
res = res.swapaxes(0, 2)
res = res.swapaxes(2, 1)
if verbose:
print("Shape after: ", res.shape)
return res
#################
save_path = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/"
full_dataframe = pd.read_csv(BASE_DIR + "patID_x_y_z_mal.csv")
patients = full_dataframe.patient_id.unique()
#patient = "1.3.6.1.4.1.14519.5.2.1.6279.6001.131939324905446238286154504249"
for patient in patients:
patient_df = full_dataframe.loc[full_dataframe['patient_id'] == patient] #create a dateframe assoicated to a single patient
patient_df = patient_df.sort_values('z_center')
patient_path = patient_df.file_path.unique()[0] #locate the path to the '.mhd' file
print(patient)
#####################################
#### Load and process image ########
#####################################
itk_img = SimpleITK.ReadImage(patient_path)
if (np.array(itk_img.GetDirection()) != np.array([ 1., 0., 0., 0., 1., 0., 0., 0., 1.])).all():
print("WARNING!!!!! Image in different direction")
image_array = SimpleITK.GetArrayFromImage(itk_img)
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
image_array = rescale_patient_images(image_array, spacing, TARGET_VOXEL_MM)
tfrecord_file0 = save_path + patient + "_0.tfrecord"
tfrecord_file1 = save_path + patient + "_1.tfrecord"
tfrecord_file2 = save_path + patient + "_2.tfrecord"
tfrecord_file3 = save_path + patient + "_3.tfrecord"
tfrecord_file4 = save_path + patient + "_4.tfrecord"
tfrecord_file5 = save_path + patient + "_5.tfrecord"
writer0 = tf.python_io.TFRecordWriter(tfrecord_file0)
writer1 = tf.python_io.TFRecordWriter(tfrecord_file1)
writer2 = tf.python_io.TFRecordWriter(tfrecord_file2)
writer3 = tf.python_io.TFRecordWriter(tfrecord_file3)
writer4 = tf.python_io.TFRecordWriter(tfrecord_file4)
writer5 = tf.python_io.TFRecordWriter(tfrecord_file5)
for index, row in patient_df.iterrows():
#TEMP#####
z_perc = row["z_center_perc"]
y_perc = row["y_center_perc"]
x_perc = row["x_center_perc"]
image_cube = extract_cube(image_array,z_perc,y_perc,x_perc)
image_label = (row["malscore"], row["spiculation"], row["lobulation"])
add_to_tfrecord(writer,image_cube, image_label)
writer.close()
#np.save(settings.BASE_DIR + "resources/_cubes/" + patient + '_train.npy', (image_cubes, image_labels))
| mit |
f3r/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
jstoxrocky/statsmodels | examples/python/robust_models_0.py | 33 | 2992 |
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
| bsd-3-clause |
Odingod/mne-python | examples/inverse/plot_lcmv_beamformer.py | 18 | 2801 | """
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on evoked dataset for three different choices
of source orientation and stores the solutions in stc files for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
| bsd-3-clause |
LohithBlaze/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/ensemble/forest.py | 2 | 59479 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
curr_sample_weight *= compute_sample_weight('auto', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict_proba(X[mask_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][mask_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
valid_presets = ('auto', 'subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"auto" and "subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "auto" or "subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"auto" weights, use compute_class_weight("auto", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if self.class_weight != 'subsample' or not self.bootstrap:
if self.class_weight == 'subsample':
class_weight = 'auto'
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict(X[mask_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask_indices, :] += p_estimator
n_predictions[mask_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "auto", "subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
The "subsample" mode is the same as "auto" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "auto", "subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
The "subsample" mode is the same as "auto" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
laurensstoop/HiSPARC-BONZ | egg/legacy/egg_saskia_v5.3.py | 1 | 15220 | # -*- coding: utf-8 -*-
#
############################################################################
#
# Program for analysing HiSPARC data
#
# This software is made under the GNU General Public License, version 3 (GPL-3.0)
#
############################################################################
"""
===================================
Created on Thu Mar 24 13:17:57 2016
@author: Laurens Stoop
===================================
"""
################################## HEADER ##################################
"""
Import of Packages
"""
print "POKING SASKIA, NOW IMPORTING PYTHON FEED"
import sapphire # The HiSparc Python Framework
import tables # A HDF5 python module that allows to store data
import datetime # A package to decode the timeformat of HiSparc data
import matplotlib.pyplot as plt # Plotting functionality of MatPlotLib
import numpy as np # This is NumPy
import rootpy # Get the pythonesc version of ROOT
import os.path # To check if files exist (so you don't do stuff again)
import rootpy.interactive # Get some option like a wait()
from rootpy.plotting import root2matplotlib
from matplotlib.colors import LogNorm
import ROOT
import array
"""
Getting the data file and setting the variables
"""
# Time between which the data is downloaded (jjjj,mm,dd,[hh])
START = datetime.datetime(2016,01,01)
END = datetime.datetime(2016,01,02)
# Give the list of stations
STATIONS = [501]#,503,1006,1101,3001,13002,14001,20003]
# Do not show the figures
plt.ioff()
################################## STYLE ##################################
def style(name='hisparc', shape='rect', orientation='landscape'):
# The style we make
STYLE = rootpy.plotting.Style(name, 'HiSPARC')
# We do not want borders
STYLE.SetCanvasBorderMode(0)
STYLE.SetFrameBorderMode(0)
# The style of the line
STYLE.SetHistLineColor(1)
STYLE.SetHistLineStyle(0)
STYLE.SetHistLineWidth(2)
#For the fit/function information
STYLE.SetOptFit(1111)
STYLE.SetFitFormat("5.4g")
STYLE.SetFuncColor(2)
STYLE.SetFuncStyle(1)
STYLE.SetFuncWidth(2)
STYLE.SetOptStat(0)
# Change for log plots:
STYLE.SetOptLogx(0)
STYLE.SetOptLogy(1)
STYLE.SetOptLogz(0)
# Whit background
STYLE.SetFrameFillColor(0)
STYLE.SetCanvasColor(0)
STYLE.SetPadColor(0)
STYLE.SetStatColor(0)
return STYLE
################################## FUNC ##################################
print "GETTING THE FUNC OUT"
# This function gets the data into a file with obvious naming structure
def saskia_data_download( data_file_name, station_number=501, begin=datetime.datetime(2016,01,01), final = datetime.datetime(2016,01,02) ):
# Data is downloaded
if '/s%d' %station_number not in data_file_name:
# Let them know what we do
print "\nGetting event data from station %d " % station
# Now retrieve the event data
sapphire.esd.download_data(
data_file_name, # File (as opened above)
'/s%d' %station_number, # Group name (/s..station..)
station_number, # Station number
begin, # Start data date
final, # End data date
'events', # Download events (or 'weather')
True) # Show progress
# Let them know what we do
print "\nGetting wheater data from station %d " % station
# Now retrieve the wheater data
sapphire.esd.download_data(
data_file_name, # File (as opened above)
'/s%d' %station_number, # Group name (/s..station..)
station_number, # Station number
begin, # Start data date
final, # End data date
'weather', # Download wheater
True) # Show progress
# If the datafile has the group we do not download them data
else:
print "All data present for station %d" % station
class saskia_data_entry_type(tables.IsDescription):
name = tables.StringCol(16) # 16-character String
idnumber = tables.Int64Col() # Signed 64-bit integer
ADCcount = tables.UInt16Col() # Unsigned short integer
TDCcount = tables.UInt8Col() # unsigned byte
grid_i = tables.Int32Col() # 32-bit integer
grid_j = tables.Int32Col() # 32-bit integer
pressure = tables.Float32Col() # float (single-precision)
energy = tables.Float64Col() # double (double-precision)
# Do the dataconversion of the ADC
def saskia_adc_conversion( data_file_name , station_number = 501 ):
# If no new group present then create one
# if ('/s%d/converted' %station_number) not in data_file_name:
# Get the unconverted data
data_set = data_file_name.get_node(
'/s%d' %station_number, # From the group (/s..station..)
'events') # Get the node with events
data_ph = data_set.col('pulseheights')
data_pi = data_set.col('integrals')
data_converted_ph = 0.57 * data_ph[:]- 113
data_converted_pi = 0.57 * data_pi[:]- 113 # Needs more work
data_combo = np.stack(
(data_converted_ph, # The pulse integral on y axis
data_converted_pi)) # To get the direction correct
print data_combo
data_set
# Create a group for the converted data
data_converted_storage = data_file_name.create_group('/s%d/' % station_number, 'handled', 'Handled data')
#data_converted_table =
data_file_name.create_table(data_converted_storage, 'converted', saskia_data_entry_type, "Converted data")
# Load a histogram
def saskia_load_pulseheight( data_file_name, station_number=501, detector=0, number_bins=200, range_start=0., range_end=4500):
# Get event data
event_data = data_file_name.get_node(
'/s%d' %station_number, # From the group (/s..station..)
'events') # Get the node with events
# Get the pulseheight from all events
data_ph = event_data.col('pulseheights') # col takes all data from events
# Create a histogram
ph_histo = rootpy.plotting.Hist(number_bins, range_start, range_end, drawstyle='hist')
# Fill it with data
ph_histo.fill_array(data_ph[:,detector])
return ph_histo
# This function plots the pulse_histograms depending on number of bins and range
def saskia_plot_pulseheight( data_file_name, station_number=501, detector=0, number_bins=200, range_start=0., range_end=4500 ):
# If the plot exist we skip the plotting
if os.path.isfile('./img/pulseheigt_histogram_%d_detector_%d.pdf' % (station_number, detector)):
# Say if the plot is present
print "Plot already present for station %d" % station_number
# If there is no plot we make it
else:
# Now transform the ROOT histogram to a python figure
rootpy.plotting.root2matplotlib.hist(ph_histo)
# Setting the limits on the axis
plt.ylim((pow(10,-1),pow(10,7)))
plt.xlim((range_start, range_end))
plt.yscale('log')
# Setting the plot labels and title
plt.xlabel("Pulseheight [ADC]")
plt.ylabel("Counts")
plt.title("Pulseheight histogram (log scale) for station (%d)" %station_number)
# Saving them Pica
plt.savefig(
'./img/pulseheigt_histogram_%d_detector_%d.pdf' % (station_number, detector) , # Name of the file
bbox_inches='tight') # Use less whitespace
def saskia_fitplot_pulseheight( ph_histo, station_number, detector=0):
# If the plot exist we skip the plotting
if os.path.isfile('./img/fitted_pulseheigt_histogram_%d_detector_%d.pdf' % (station_number, detector)):
# Say if the plot is present
print "Fitted plot already present for station %d, detector %d" % (station_number, detector)
# If there is no plot we make it
else:
# Set the plot style
rootpy.plotting.set_style(style('hisparc'))
# We work on a invisible canvas and fit the histogram and plot it
with rootpy.context.invisible_canvas() as canv:
# Properties of the canvas
canv.axes(xlimits=[100,4500],ylimits=[pow(10,-1),pow(10,7)], xbins=200)
# The fitfunction definitions
ph_fitf_signal = ROOT.TF1( 'ph_fitf_signal', 'landau',120,2500)
ph_fitf_bkg = ROOT.TF1( 'ph_fitf_bkg', 'expo',0,200)
ph_fitf_total = ROOT.TF1('ph_fitf_total','expo(0)+landau(2)',120,2500)
# THe fitting of the pre-functions
ph_histo.Fit(ph_fitf_signal,'MQR')
ph_histo.Fit(ph_fitf_bkg,'MQR+')
# Retrieving the fitparameters for final fit
ph_par_signal= ph_fitf_signal.GetParameters()
ph_par_bkg = ph_fitf_bkg.GetParameters()
# Making an empty array in which the fitparameters can be loaded
ph_par_total = array.array( 'd', 5*[0.] )
# Loading of the fitparameters
ph_par_total[0], ph_par_total[1] = ph_par_bkg[0], ph_par_bkg[1]
ph_par_total[2], ph_par_total[3], ph_par_total[4] = ph_par_signal[0], ph_par_signal[1], ph_par_signal[2]
# Set the fitparameters and their names for final fit
ph_fitf_total.SetParameters( ph_par_total )
ph_fitf_total.SetParName(0,'Exp. decay offset')
ph_fitf_total.SetParName(1,'Exp. decay const')
ph_fitf_total.SetParName(2,'#mu_{peak}')
ph_fitf_total.SetParName(3,'#sigma (scale parameter)')
ph_fitf_total.SetParName(4,'Normalization')
# Fit the full function
ph_histo.Fit(ph_fitf_total,'MR')
# Get the parameters for later use
ph_par = ph_fitf_total.GetParameters()
# set visual attributes
ph_histo.linecolor = 'blue'
ph_histo.markercolor = 'blue'
ph_histo.xaxis.SetTitle("Pulseheight [ADC]")
ph_histo.yaxis.SetTitle("Count")
# Draw the histo gram and the fitfunction
ph_histo.Draw()
ph_fitf_total.Draw('same hisparc')
# Save the image for thesis
canv.SaveAs('./img/fitted_pulseheigt_histogram_%d_detector_%d.pdf' % (station_number, detector))
# Return the found fitparameters for the total fit function
return [ph_par]
################################## BODY ##################################
"""
Data acquisition
"""
print "SPARCS ENABLED, TREE ROOTS ACTIVATED, TIMING ADJUSTED"
# Open a data file (automatic close)
with tables.open_file('egg_saskia.h5','a') as data_file:
print data_file
# Retrieve for every station the data and plot a pulsehisto
for station in STATIONS:
# Getting the data
saskia_data_download( data_file, station, START, END)
saskia_adc_conversion( data_file, station )
# Create the histogram
ph_histo = saskia_load_pulseheight( data_file, station, 0, 200, 0., 4500)
# Plot some data
saskia_plot_pulseheight( data_file, station, 0, 200, 0., 4500)
# Fit the functions
saskia_fitplot_pulseheight(ph_histo,station,0)
####### Pulseheight vs pulse integral histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('./img/pmt_saturation_s%d.pdf' %station):
# Say if the plot is present
print "PMT saturation histogram already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
'/s%d' %station, # From the group (/s..station..)
'events') # Get the node with events
# Get the pulseheight from all events
data_phs = event_data.col('pulseheights') # col takes all data from events (this improves the speed)
# Get the integral from all events
data_ints = event_data.col('integrals') # col takes all data from events
# Make a figure so it can be closed
figure_combo, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex = 'col', sharey = 'row')
# Setting the plot titles
ax1.set_title('Detector 1')
ax2.set_title('Detector 2')
ax3.set_title('Detector 3')
ax4.set_title('Detector 4')
# Setting the plot labels
ax1.set_ylabel('Pulseheight [ADC]')
ax3.set_ylabel('Pulseheight [ADC]')
ax3.set_xlabel('Pulse integral [ADC.ns]')
ax4.set_xlabel('Pulse integral [ADC.ns]')
# Now we plot the data of every detector
for detector in range(0,4):
# Select the detector data
data_ph_detector = data_phs[:,detector]
data_int_detector = data_ints[:,detector]
# Combine the detector data
data_combo = np.stack(
(data_int_detector, # The pulse integral on y axis
data_ph_detector), # The pulseheight on x axis
axis=-1) # To get the direction correct
# Initiate a 2D histogram (ROOT style)
histo_combo_detector = rootpy.plotting.Hist2D(100, 0, 150000, 100, 0, 4500)
# Fill the Histogram
histo_combo_detector.fill_array(data_combo)
# Plot the histogram with logarithmic colors in correct place
if detector == 0:
root2matplotlib.hist2d(histo_combo_detector, norm=LogNorm(), axes=ax1)
elif detector == 1:
root2matplotlib.hist2d(histo_combo_detector, norm=LogNorm(), axes=ax2)
elif detector == 2:
root2matplotlib.hist2d(histo_combo_detector, norm=LogNorm(), axes=ax3)
elif detector == 3:
root2matplotlib.hist2d(histo_combo_detector, norm=LogNorm(), axes=ax4)
# Save the file
figure_combo.savefig(
'./img/pmt_saturation_s%d.pdf' %station) # Name of the file
# Close the figure
plt.close(figure_combo)
# Now we go to the next detector
detector +1
print "####### I'm Done Bitches! #######"
################################## FOOTER ##################################
"""
Clean up shit
"""
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.