repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
toobaz/pandas | pandas/tests/util/test_hashing.py | 2 | 11248 | import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples
from pandas.util import hash_array, hash_pandas_object
import pandas.util.testing as tm
@pytest.fixture(
params=[
Series([1, 2, 3] * 3, dtype="int32"),
Series([None, 2.5, 3.5] * 3, dtype="float32"),
Series(["a", "b", "c"] * 3, dtype="category"),
Series(["d", "e", "f"] * 3),
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
Series(pd.timedelta_range("2000", periods=9)),
]
)
def series(request):
return request.param
@pytest.fixture(params=[True, False])
def index(request):
return request.param
def _check_equal(obj, **kwargs):
"""
Check that hashing an objects produces the same value each time.
Parameters
----------
obj : object
The object to hash.
kwargs : kwargs
Keyword arguments to pass to the hashing function.
"""
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
def _check_not_equal_with_index(obj):
"""
Check the hash of an object with and without its index is not the same.
Parameters
----------
obj : object
The object to hash.
"""
if not isinstance(obj, Index):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
if len(obj):
assert not (a == b).all()
def test_consistency():
# Check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth.
result = hash_pandas_object(Index(["foo", "bar", "baz"]))
expected = Series(
np.array(
[3600424527151052760, 1374399572096150070, 477881037637427054],
dtype="uint64",
),
index=["foo", "bar", "baz"],
)
tm.assert_series_equal(result, expected)
def test_hash_array(series):
arr = series.values
tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))
@pytest.mark.parametrize(
"arr2", [np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object)]
)
def test_hash_array_mixed(arr2):
result1 = hash_array(np.array(["3", "4", "All"]))
result2 = hash_array(arr2)
tm.assert_numpy_array_equal(result1, result2)
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_array_errors(val):
msg = "must pass a ndarray-like"
with pytest.raises(TypeError, match=msg):
hash_array(val)
def test_hash_tuples():
tuples = [(1, "one"), (1, "two"), (2, "one")]
result = hash_tuples(tuples)
expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values
tm.assert_numpy_array_equal(result, expected)
result = hash_tuples(tuples[0])
assert result == expected[0]
@pytest.mark.parametrize(
"tup",
[(1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), ("A", pd.Timestamp("2012-01-01"))],
)
def test_hash_tuple(tup):
# Test equivalence between
# hash_tuples and hash_tuple.
result = hash_tuple(tup)
expected = hash_tuples([tup])[0]
assert result == expected
@pytest.mark.parametrize(
"val",
[
1,
1.4,
"A",
b"A",
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01", tz="Europe/Brussels"),
datetime.datetime(2012, 1, 1),
pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(),
pd.Timedelta("1 days"),
datetime.timedelta(1),
pd.Period("2012-01-01", freq="D"),
pd.Interval(0, 1),
np.nan,
pd.NaT,
None,
],
)
def test_hash_scalar(val):
result = _hash_scalar(val)
expected = hash_array(np.array([val], dtype=object), categorize=True)
assert result[0] == expected[0]
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_tuples_err(val):
msg = "must be convertible to a list-of-tuples"
with pytest.raises(TypeError, match=msg):
hash_tuples(val)
def test_multiindex_unique():
mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)])
assert mi.is_unique is True
result = hash_pandas_object(mi)
assert result.is_unique is True
def test_multiindex_objects():
mi = MultiIndex(
levels=[["b", "d", "a"], [1, 2, 3]],
codes=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=["col1", "col2"],
)
recons = mi._sort_levels_monotonic()
# These are equal.
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# _hashed_values and hash_pandas_object(..., index=False) equivalency.
expected = hash_pandas_object(mi, index=False).values
result = mi._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = hash_pandas_object(recons, index=False).values
result = recons._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = mi._hashed_values
result = recons._hashed_values
# Values should match, but in different order.
tm.assert_numpy_array_equal(np.sort(result), np.sort(expected))
@pytest.mark.parametrize(
"obj",
[
Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(["a", "b", "c"]),
Series(["a", np.nan, "c"]),
Series(["a", None, "c"]),
Series([True, False, True]),
Series(),
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
Series(tm.makePeriodIndex()),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
MultiIndex.from_product(
[range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]
),
MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]),
],
)
def test_hash_pandas_object(obj, index):
_check_equal(obj, index=index)
_check_not_equal_with_index(obj)
def test_hash_pandas_object2(series, index):
_check_equal(series, index=index)
_check_not_equal_with_index(series)
@pytest.mark.parametrize(
"obj", [Series([], dtype="float64"), Series([], dtype="object"), Index([])]
)
def test_hash_pandas_empty_object(obj, index):
# These are by-definition the same with
# or without the index as the data is empty.
_check_equal(obj, index=index)
@pytest.mark.parametrize(
"s1",
[
Series(["a", "b", "c", "d"]),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4)),
],
)
@pytest.mark.parametrize("categorize", [True, False])
def test_categorical_consistency(s1, categorize):
# see gh-15143
#
# Check that categoricals hash consistent with their values,
# not codes. This should work for categoricals of any dtype.
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
# These should all hash identically.
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency():
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")
)
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes([-1, 0], categories=[pd.Timestamp("2012-01-01")])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
@pytest.mark.parametrize("obj", [pd.Timestamp("20130101")])
def test_pandas_errors(obj):
msg = "Unexpected type for hashing"
with pytest.raises(TypeError, match=msg):
hash_pandas_object(obj)
def test_hash_keys():
# Using different hash keys, should have
# different hashes for the same data.
#
# This only matters for object dtypes.
obj = Series(list("abc"))
a = hash_pandas_object(obj, hash_key="9876543210123456")
b = hash_pandas_object(obj, hash_key="9876543210123465")
assert (a != b).all()
def test_invalid_key():
# This only matters for object dtypes.
msg = "key should be a 16-byte string encoded"
with pytest.raises(ValueError, match=msg):
hash_pandas_object(Series(list("abc")), hash_key="foo")
def test_already_encoded(index):
# If already encoded, then ok.
obj = Series(list("abc")).str.encode("utf8")
_check_equal(obj, index=index)
def test_alternate_encoding(index):
obj = Series(list("abc"))
_check_equal(obj, index=index, encoding="ascii")
@pytest.mark.parametrize("l_exp", range(8))
@pytest.mark.parametrize("l_add", [0, 1])
def test_same_len_hash_collisions(l_exp, l_add):
length = 2 ** (l_exp + 8) + l_add
s = tm.rands_array(length, 2)
result = hash_array(s, "utf8")
assert not result[0] == result[1]
def test_hash_collisions():
# Hash collisions are bad.
#
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
hashes = [
"Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa: E501
"Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", # noqa: E501
]
# These should be different.
result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8")
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8")
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(hashes, dtype=object), "utf8")
tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
| bsd-3-clause |
SaschaMester/delicium | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
sonnyhu/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/tests/test_skew.py | 5 | 7259 | """
Testing that skewed axes properly work
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.patches as mpatch
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def update_position(self, loc):
# This ensures that the new value of the location is set before
# any other updates take place
self._loc = loc
super(SkewXTick, self).update_position(loc)
def _has_default_loc(self):
return self.get_loc() is None
def _need_lower(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.lower_xlim,
self.get_loc()))
def _need_upper(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.upper_xlim,
self.get_loc()))
@property
def gridOn(self):
return (self._gridOn and (self._has_default_loc() or
transforms.interval_contains(self.get_view_interval(),
self.get_loc())))
@gridOn.setter
def gridOn(self, value):
self._gridOn = value
@property
def tick1On(self):
return self._tick1On and self._need_lower()
@tick1On.setter
def tick1On(self, value):
self._tick1On = value
@property
def label1On(self):
return self._label1On and self._need_lower()
@label1On.setter
def label1On(self, value):
self._label1On = value
@property
def tick2On(self):
return self._tick2On and self._need_upper()
@tick2On.setter
def tick2On(self, value):
self._tick2On = value
@property
def label2On(self):
return self._label2On and self._need_upper()
@label2On.setter
def label2On(self, value):
self._label2On = value
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def _get_tick(self, major):
return SkewXTick(self.axes, None, '', major=major)
def get_view_interval(self):
return self.axes.upper_xlim[0], self.axes.lower_xlim[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
@property
def lower_xlim(self):
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
pts = [[0., 1.], [1., 1.]]
return self.transDataToAxes.inverted().transform(pts)[:, 0]
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
@image_comparison(baseline_images=['skew_axes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='skewx')
ax.set_xlim(-50, 50)
ax.set_ylim(50, -50)
ax.grid(True)
# An example of a slanted line at constant X
ax.axvline(0, color='b')
@image_comparison(baseline_images=['skew_rects'], remove_text=True)
def test_skew_rectange():
fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(16, 12))
axes = axes.flat
rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2))
axes[0].set_xlim([-4, 4])
axes[0].set_ylim([-4, 4])
axes[0].set_aspect('equal')
for ax, (xrots, yrots) in zip(axes, rotations):
xdeg, ydeg = 45 * xrots, 45 * yrots
t = transforms.Affine2D().skew_deg(xdeg, ydeg)
ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg))
ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2,
transform=t + ax.transData,
alpha=0.5, facecolor='coral'))
plt.subplots_adjust(wspace=0, left=0, right=1, bottom=0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| bsd-2-clause |
georgid/sms-tools | lectures/5-Sinusoidal-model/plots-code/spectral-peaks-interpolation.py | 2 | 1234 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(N/2)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,min(pX),-6])
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks-interpolation.png')
plt.show()
| agpl-3.0 |
mwalton/artificial-olfaction | python/plots.py | 1 | 1079 | import matplotlib.pyplot as plt
#from Image import NEAREST
#from matplotlib.cm import cmap_d
import numpy as np
#import pylab as pl
def accuracy(target, prediction, label="Classifier", c=np.zeros((0,0))):
correct = (target == prediction)
correct = np.array((correct, correct))
compare = np.array((target, prediction))
showC = c != np.zeros((0,0))
if (showC):
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, figsize=(6,10))
else:
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6,8))
dim = [0,compare.shape[1],0,compare.shape[0]]
ax1.imshow(compare, extent=dim, aspect='auto', interpolation='nearest')
ax1.set_title(label + ": Prediction vs. Target")
imgPlt = ax2.imshow(correct, extent=dim, aspect='auto', interpolation='nearest')
imgPlt.set_cmap('RdYlGn')
ax2.set_title(label + " Prediction Accuracy")
if (showC):
ax3.plot(c)
ax3.set_title("Concentration")
ax3.set_yscale('log')
ax3.set_ylim(0.02,0.7)
plt.draw()
def show():
plt.show() | mit |
yueranyuan/vector_edu | wavelet_analysis.py | 1 | 4265 | import numpy as np
import matplotlib.pyplot as plt
from learntools.emotiv.data import segment_raw_data, gen_wavelet_features
from learntools.emotiv.filter import filter_data
from learntools.libs.wavelet import signal_to_wavelet
def show_raw_wave(eeg):
for channel in xrange(14):
plt.plot(eeg[:, channel])
plt.show()
def show_raw_specgram(eeg, label, block=False):
fig, axs = plt.subplots(nrows=14, ncols=1)
for channel in xrange(14):
#axs[channel].plot(signal_to_freq_bins(eeg[:, channel], cutoffs=[0.5, 4.0, 7.0, 12.0, 30.0], sampling_rate=128))
axs[channel].specgram(eeg[:, channel], Fs=128)
axs[channel].set_title("{}[{}]".format(label, channel))
fig.show()
if block:
fig.ginput(timeout=0)
plt.close('all')
def specgram_slideshow(ds):
for row in xrange(len(ds)):
show_raw_specgram(ds['eeg'][row], "cond=" + str(ds['condition'][row]), block=True)
def plot_conditions(eeg, conditions):
eeg1_full = np.asarray(list(compress(eeg, conditions == 0)))
eeg2_full = np.asarray(list(compress(eeg, conditions == 1)))
# draw select trials
for i in xrange(10):
plt.subplot(1, 10, i + 1)
plt.pcolor(eeg1_full[i], cmap=plt.cm.Blues)
plt.show()
eeg1 = np.mean(eeg1_full, axis=0)
eeg2 = np.mean(eeg2_full, axis=0)
def _plot_heatmap(data):
return plt.pcolor(data, cmap=plt.cm.Blues)
# draw between class difference
plt.subplot(1, 3, 1)
_plot_heatmap(eeg1)
plt.subplot(1, 3, 2)
_plot_heatmap(eeg2)
plt.subplot(1, 3, 3)
_plot_heatmap(eeg1-eeg2)
plt.show()
# draw within class difference
plt.subplot(1, 4, 1)
_plot_heatmap(np.mean(eeg1_full[:(len(eeg1) / 2)], axis=0))
plt.subplot(1, 4, 2)
_plot_heatmap(np.mean(eeg1_full[(len(eeg1) / 2):], axis=0))
plt.subplot(1, 4, 3)
_plot_heatmap(np.mean(eeg2_full[:(len(eeg2) / 2)], axis=0))
plt.subplot(1, 4, 4)
_plot_heatmap(np.mean(eeg2_full[(len(eeg2) / 2):], axis=0))
plt.show()
def _shape(ys):
""" Get the shape of a non-numpy python array. This assumes the first index of every dimension is
indicative of the shape of the whole matrix.
Examples:
>>> _shape([1, 2, 3])
[3]
>>> _shape([[1, 2, 3], [4, 5]])
[2, 3]
"""
if hasattr(ys, '__len__'):
return [len(ys)] + _shape(ys[0])
else:
return []
def plot_waves(ys, ylim=None):
shape = _shape(ys)
if len(shape) > 3:
from operator import __mul__
dim1 = reduce(__mul__, shape[:-2])
dim2 = shape[-2]
elif len(shape) == 3:
dim1, dim2 = shape[:2]
elif len(shape) == 2:
dim1, dim2 = shape[0], 1
elif len(shape) == 1:
dim1 = dim2 = 1
else:
raise Exception("malformed ys")
def _plot_wave(y, i):
if len(_shape(y)) == 1:
print i
plt.subplot(dim1, dim2, i)
if ylim is not None:
plt.ylim(ylim)
plt.plot(y)
return i + 1
else:
for _y in y:
i = _plot_wave(_y, i)
return i
_plot_wave(ys, 1)
plt.show()
def analyze_waves(ds, n=20, ylim=(-80, 80)):
for i in xrange(n):
eeg_segment = ds['eeg'][i]
wavelet = signal_to_wavelet(eeg_segment[:, 0], min_length=0, max_length=None,
depth=5, family='db6')
plot_waves(eeg_segment.T)
plot_waves([(w, _downsample(w, 6)) for w in wavelet], ylim=ylim)
exit()
def analyze_features(ds, max_length=4):
ds = gen_wavelet_features(ds, duration=10, sample_rate=128, depth=5, min_length=3, max_length=max_length,
family='db6')
filter_data(ds)
eeg = ds['eeg'][:]
eeg = eeg.reshape((eeg.shape[0], 14, 6, max_length))
eeg_no_time = np.mean(eeg, axis=3)
plot_conditions(eeg=eeg_no_time, conditions=ds['condition'])
if __name__ == "__main__":
from itertools import compress
from learntools.libs.wavelet import _downsample
dataset_name = 'data/emotiv_all.gz'
ds = segment_raw_data(dataset_name=dataset_name, conds=['EyesOpen', 'EyesClosed'])
# analyze_waves(ds, n=2)
analyze_features(ds, max_length=4) | mit |
ducminhkhoi/PrecipitationPrediction | main.py | 1 | 12789 | from __future__ import division
import numpy as np
import pandas as pd
# import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
# plt.close('all')
# Global variables
train_data = []
train_labels = []
test_data = []
test_labels = []
list_features = []
X = []
Y = []
station = []
# Control mode variables
run_mode = 1 # 1: for classification mode, 0: for regression mode
filter_data = 1 # USEFUL
add_change_data = 0 # NOT USEFUL
use_window_mode = 1 # USEFUL for X, Y raw data
visualize = 0
test_mode = 0
# Hyper-parameter variables
time_series_length = 50
batch_size = 32
window_size = 6
percent_of_testing = 0.1
nb_epochs = 100
# stations = ['CARL', 'LANE', 'VANO', 'WEBR'] # train and test stations
start_date = '2009-01-01'
nearby_range = 30 # km
def read_data(window_size=6): # read data and load into global variables
global list_features, station, list_stations
df = pd.read_csv('dataset/All_station_2008.csv');
station = 'CARL'
list_stations_coord = get_station_coord()
stations = list_stations_coord.keys()
n_nearby_stations = []
for station in stations:
n_nearby_stations.append(len(get_nearby_stations(station, list_stations_coord)))
list_features_raw = list(df.columns.values)
list_features = [x for x in list_features_raw if not x.startswith('Q')]
list_soil_temp_features = ['TS05', 'TB05', 'TS10', 'TB10', 'TS30', 'BATV']
list_features[0:6] = []
list_features = [x for x in list_features if x not in list_soil_temp_features]
# Get complete data with clean RAIN
df = df.sort_values(by=['STID','Year', 'Month', 'Day', 'Time'])
date = '2009-06-21'
# --------------------------------------------
# Filter some information
# df = df[
# (df.STID != 'KING')
# & (df.QRAIN == 0)
# & (df.Month >= 6)
# # & (df.Date == date)
# & (df.Day >= 11)
# & (df.Day <= 14)
# & (df.Month <= 6)
# ]
#--------------------------------------------
rain_index = list_features_raw.index('RAIN')
pressure_index = list_features_raw.index('PRES')
time_index = list_features_raw.index('Time')
date_index = list_features_raw.index('Date')
station_index = list_features_raw.index('STID')
data = np.array(df)
n_rows = data.shape[0]
real_Y = np.zeros(shape=(n_rows))
# Get Y data: Y label
for idx, val in enumerate(data):
print(str(idx) + '/' + str(len(data)))
if idx == 0 or data[idx, time_index] == 5 or (data[idx, time_index] == 0 and data[idx,date_index] == '2009-01-01'):
real_Y[idx] = data[idx,rain_index]
else:
real_Y[idx] = data[idx,rain_index] - data[idx-1,rain_index]
list_feature_index = [list_features_raw.index(x) for x in list_features]
X = data[:, list_feature_index]
if run_mode == 1:
Y = (real_Y > 0).astype(int)
else:
Y = real_Y
if add_change_data:
temp_list = []
list_features_change = ['SRAD']
# get and add change feature to list feature
for feature in list_features_change:
feature_data = X[:, list_features.index(feature)]
change_feature = np.reshape(get_changes(feature_data), (n_rows,1))
X = np.append(X, change_feature,1)
temp_list.append('C'+feature)
list_features.extend(temp_list)
# Filter Features
if filter_data:
print('List features: ' + str(list_features))
list_choose_features = ['RELH', 'TAIR', 'WSSD', 'WDIR', 'TA9M', 'PRES', 'SRAD']
index_choose_features = [list_features.index(x) for x in list_choose_features]
# visualize_station_data(list_choose_features, X)
X = X[:, index_choose_features]
list_features = list_choose_features
# end filter Features
# add window size
if use_window_mode:
Y_new = []
for i, _ in enumerate(Y):
# print str(i)+'/'+str(len(Y))
Y_sum = 0
for k in range(-window_size, window_size):
if i + k < 0 or i + k >= X.shape[0]: continue
Y_sum += Y[i+k]
Y_new.append(Y_sum)
if run_mode == 1:
Y = (np.array(Y_new) > 0).astype(int)
else:
Y = np.array(Y_new)
station_start_indices = {}
station_end_indices = {}
for s in stations:
station_start_indices[s] = np.where(data[:, station_index] == s)[0][0]
station_end_indices[s] = np.where(data[:, station_index] == s)[0][-1] + 1
# Add data for model 2
station_values = {}
for s in stations:
station_values[s] = Y[station_start_indices[s]: station_end_indices[s]]
# visualize nearby station data
# station = 'CARL'
# nearby_stations = get_nearby_stations(station, 3)
# visualize_nearby_station_data(station, nearby_stations, station_values)
X_new = []
Y_new = []
for station in stations:
nearby_stations = get_nearby_stations(station, list_stations_coord)
for i, y in enumerate(station_values[station]):
values = [station_values[s][i] for s in nearby_stations]
X_new.append(values)
Y_new.append(y)
X_2 = np.array(X_new)
if run_mode == 1:
Y_2 = (np.array(Y_new) > 0).astype(int)
else:
Y_2 = np.array(Y_new)
# Add data for model 3
X_new = []
Y_new = []
for station in stations:
nearby_stations = get_nearby_stations(station, list_stations_coord)
for i, y in enumerate(station_values[station]):
station_own_values = X[station_start_indices[station] + i, :]
# station_own_values = X[range_indices,:]
nearby_stations_value = np.transpose([station_values[s][i] for s in nearby_stations])
values = np.append(station_own_values, nearby_stations_value)
X_new.append(values)
Y_new.append(y)
X_3 = np.array(X_new)
if run_mode == 1:
Y_3 = (np.array(Y_new) > 0).astype(int)
else:
Y_3 = np.array(Y_new)
return X, Y, X_2, Y_2, X_3, Y_3
def get_nearby_stations(station, list_stations_coord):
# change later
list_nearby_stations = []
lon_station, lat_station = list_stations_coord[station]
for s in list_stations_coord.keys():
if s == station: continue
lon_s, lat_s = list_stations_coord[s]
distance = haversine(lon_station, lat_station, lon_s, lat_s)
if distance < nearby_range:
list_nearby_stations.append(s)
return list_nearby_stations
pass
def visualize_station_data(feature_names, X):
plt.figure()
n_rows = X.shape[0]
time = np.arange(0, n_rows) / 288 * 2400 - 600
plt.title(str(feature_names) + ' over time at station: ' + station)
n_subplot = len(feature_names)
for i, feature_name in enumerate(feature_names):
plt.subplot(n_subplot, 1, i + 1)
plt.plot(time, X[:, list_features.index(feature_name)], label=feature_name)
plt.legend(loc='best')
plt.savefig('images/' + str(feature_names) + '_over_time_at_station: ' + station)
plt.show()
pass
def visualize_nearby_station_data(station, nearby_stations, station_values):
plt.figure()
n_subplot = len(nearby_stations) + 1
plt.title(str('station values and nearby stations'))
for i, s in enumerate(nearby_stations):
plt.subplot(n_subplot, 1, i + 1)
n_rows = len(station_values[s])
time = np.arange(0, n_rows) / 288 * 2400 - 600
plt.plot(time, station_values[s], label=s)
plt.legend(loc='best')
plt.subplot(n_subplot, 1, n_subplot)
n_rows = len(station_values[station])
time = np.arange(0, n_rows) / 288 * 2400 - 600
plt.plot(time, station_values[station], label=station)
plt.legend(loc='best')
plt.savefig('images/station values and nearby stations')
plt.show()
pass
def process_data(X, Y, permutation):
new_X = X[permutation]
new_Y = Y[permutation]
test_index = int(X.shape[0]*(1-percent_of_testing))
train_data = new_X[:test_index].astype('float32')
train_labels = new_Y[:test_index]
test_data = new_X[test_index:].astype('float32')
test_labels = new_Y[test_index:]
mean_data = np.mean(train_data,axis=0)
train_data -= mean_data
std_data = np.std(train_data.astype('float32'),axis=0)
train_data /= std_data
test_data -= mean_data
test_data /= std_data
return train_data, train_labels, test_data, test_labels
def compute_metrics(predict_Y, predict_Y_proba, test_labels, text):
temp_test_labels = test_labels
# compute metrics
from sklearn.metrics import precision_recall_fscore_support, \
roc_curve, auc, accuracy_score, confusion_matrix
fpr_rf, tpr_rf, _ = roc_curve(temp_test_labels, predict_Y_proba, pos_label=1)
auc_score = auc(fpr_rf, tpr_rf)
precision, recall, fscore, _ = precision_recall_fscore_support(temp_test_labels, predict_Y, average='binary',
pos_label=1)
accuracy = accuracy_score(temp_test_labels, predict_Y)
confusion = confusion_matrix(temp_test_labels, predict_Y)
print "precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore)
print "AUC = " + str(auc_score)
print "accuracy = " + str(accuracy)
print "Confusion matrix: "
print confusion
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label=text)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.savefig('images/ROC_curve.png')
return [precision, recall, fscore]
pass
def run_RandomForest(train_data, train_labels, test_data, test_labels, text):
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
if run_mode == 1: # Classifion mode
clf = RandomForestClassifier()
predict_Y = clf.fit(train_data, train_labels).predict(test_data)
predict_Y_proba = clf.predict_proba(test_data)[:,1]
[precision, recall, fscore] = compute_metrics(predict_Y, predict_Y_proba, test_labels, text)
return predict_Y, predict_Y_proba, [precision, recall, fscore]
else: # Regression Mode
clf = RandomForestRegressor()
predict_Y = clf.fit(train_data, train_labels).predict(test_data)
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(test_labels, predict_Y)
print "mean squared error: " + str(mse)
return mse
def get_changes(feature):
import numpy as np
new_feature = np.zeros(len(feature))
for idx, val in enumerate(feature):
if idx == 0:
new_feature[idx] = 0
else:
new_feature[idx] = feature[idx] - feature[idx - 1]
return new_feature
def haversine(lon1, lat1, lon2, lat2):
from math import radians, cos, sin, asin, sqrt
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def get_station_coord():
import csv
list_stations_coord = {}
with open('dataset/stationMetadata.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(readCSV):
if i == 0: continue
list_stations_coord[row[1]] = (float(row[8]), float(row[7]))
return list_stations_coord
pass
if __name__ == "__main__":
np.random.seed(1991)
window_sizes = range(0,30)
thresholds = range(0, 10)
list_stations_coord = get_station_coord()
X, Y, X_2, Y_2, X_3, Y_3 = read_data()
permutation = np.random.permutation(X.shape[0])
print("Run with Station own data only")
train_data, train_labels, test_data, test_labels = process_data(X, Y, permutation)
run_RandomForest(train_data, train_labels, test_data, test_labels, 'Method 3')
# print("Run with Nearby Stations data")
# train_data, train_labels, test_data, test_labels = process_data(X_2, Y_2, permutation)
#
# run_RandomForest(train_data, train_labels, test_data, test_labels, 'Method 5')
print("Combine 2 results: own station data and nearby stations")
train_data, train_labels, test_data, test_labels = process_data(X_3, Y_3, permutation)
run_RandomForest(train_data, train_labels, test_data, test_labels, 'Method 6')
| apache-2.0 |
zdszxp/gamesrc | Trdlib/src/boost_1_60_0/libs/numeric/odeint/performance/plot_result.py | 43 | 2225 | """
Copyright 2011-2014 Mario Mulansky
Copyright 2011-2014 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
import numpy as np
from matplotlib import pyplot as plt
plt.rc("font", size=16)
def get_runtime_from_file(filename):
gcc_perf_file = open(filename, 'r')
for line in gcc_perf_file:
if "Minimal Runtime:" in line:
return float(line.split(":")[-1])
t_gcc = [get_runtime_from_file("perf_workbook/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_gcc.perf")]
t_intel = [get_runtime_from_file("perf_workbook/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_intel.perf")]
t_gfort = [get_runtime_from_file("perf_workbook/rk4_gfort.perf"),
get_runtime_from_file("perf_ariel/rk4_gfort.perf"),
get_runtime_from_file("perf_lyra/rk4_gfort.perf")]
t_c_intel = [get_runtime_from_file("perf_workbook/rk4_c_intel.perf"),
get_runtime_from_file("perf_ariel/rk4_c_intel.perf"),
get_runtime_from_file("perf_lyra/rk4_c_intel.perf")]
print t_c_intel
ind = np.arange(3) # the x locations for the groups
width = 0.15 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, t_gcc, width, color='b', label="odeint gcc")
rects2 = ax.bar(ind+width, t_intel, width, color='g', label="odeint intel")
rects3 = ax.bar(ind+2*width, t_c_intel, width, color='y', label="C intel")
rects4 = ax.bar(ind+3*width, t_gfort, width, color='c', label="gfort")
ax.axis([-width, 2.0+5*width, 0.0, 0.85])
ax.set_ylabel('Runtime (s)')
ax.set_title('Performance for integrating the Lorenz system')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('Core i5-3210M\n3.1 GHz',
'Xeon E5-2690\n3.8 GHz',
'Opteron 8431\n 2.4 GHz'))
ax.legend(loc='upper left', prop={'size': 16})
plt.savefig("perf.pdf")
plt.savefig("perf.png", dpi=50)
plt.show()
| gpl-3.0 |
cauchycui/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/kernel_ridge.py | 48 | 6731 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| mit |
appapantula/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
ChinaQuants/bokeh | bokeh/compat/mplexporter/utils.py | 35 | 11620 | """
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none',
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): None})
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform()
+ Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
# get axis visibility
props['visible'] = axis.get_visible()
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn": False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'patch_visible':ax.patch.get_visible(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except ImportError:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis.get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
| bsd-3-clause |
RPGOne/scikit-learn | sklearn/cluster/spectral.py | 25 | 18535 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
michalkurka/h2o-3 | h2o-py/h2o/model/extensions/feature_interaction.py | 2 | 2636 | import h2o
class FeatureInteraction:
def _feature_interaction(self, max_interaction_depth=100, max_tree_depth=100, max_deepening=-1, path=None):
"""
Feature interactions and importance, leaf statistics and split value histograms in a tabular form.
Available for XGBoost and GBM.
Metrics:
Gain - Total gain of each feature or feature interaction.
FScore - Amount of possible splits taken on a feature or feature interaction.
wFScore - Amount of possible splits taken on a feature or feature interaction weighed by
the probability of the splits to take place.
Average wFScore - wFScore divided by FScore.
Average Gain - Gain divided by FScore.
Expected Gain - Total gain of each feature or feature interaction weighed by the probability to gather the gain.
Average Tree Index
Average Tree Depth
:param max_interaction_depth: Upper bound for extracted feature interactions depth. Defaults to 100.
:param max_tree_depth: Upper bound for tree depth. Defaults to 100.
:param max_deepening: Upper bound for interaction start deepening (zero deepening => interactions
starting at root only). Defaults to -1.
:param path: (Optional) Path where to save the output in .xlsx format (e.g. ``/mypath/file.xlsx``).
Please note that Pandas and XlsxWriter need to be installed for using this option. Defaults to None.
:examples:
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> train, valid = boston.split_frame(ratios=[.8])
>>> boston_xgb = H2OXGBoostEstimator(seed=1234)
>>> boston_xgb.train(y=response, x=predictors, training_frame=train)
>>> feature_interactions = boston_xgb.feature_interaction()
"""
kwargs = {}
kwargs["model_id"] = self.model_id
kwargs["max_interaction_depth"] = max_interaction_depth
kwargs["max_tree_depth"] = max_tree_depth
kwargs["max_deepening"] = max_deepening
json = h2o.api("POST /3/FeatureInteraction", data=kwargs)
if path is not None:
import pandas as pd
writer = pd.ExcelWriter(path, engine='xlsxwriter')
for fi in json['feature_interaction']:
fi.as_data_frame().to_excel(writer, sheet_name=fi._table_header)
writer.save()
return json['feature_interaction']
| apache-2.0 |
youprofit/scikit-image | doc/examples/plot_regionprops.py | 23 | 1297 | """
=========================
Measure region properties
=========================
This example shows how to measure properties of labelled image regions.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from skimage.draw import ellipse
from skimage.measure import label, regionprops
from skimage.transform import rotate
image = np.zeros((600, 600))
rr, cc = ellipse(300, 350, 100, 220)
image[rr, cc] = 1
image = rotate(image, angle=15, order=0)
label_img = label(image)
regions = regionprops(label_img)
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for props in regions:
y0, x0 = props.centroid
orientation = props.orientation
x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
minr, minc, maxr, maxc = props.bbox
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
ax.plot(bx, by, '-b', linewidth=2.5)
ax.axis((0, 600, 600, 0))
plt.show()
| bsd-3-clause |
Savahi/tnn | tnn/calib/trade.py | 1 | 4263 | from tnn.io import prepareData, loadNetwork
import numpy as np
import sys
from tnn.calib import graph
from tnn.network import Network
#from tnn.boosting import aggregate
from tnn.data_handler.RTS_Ret_Vol_Mom_Sto_RSI_SMA_6 import calc_data
def simple_sum(threshold = 2):
def f(decisions):
d = sum(decisions)
if d >= threshold:
return 1
elif d <= -threshold:
return -1
else:
return 0
return f
def processData(fileWithRates, calcData):
trainData, testData = prepareData(fileWithRates=fileWithRates, calcData=calcData)
#data = {key:np.array(list(trainData[key])+list(testData[key])) for key in trainData.keys()}
data = np.array(list(trainData['inputs'])+list(testData['inputs']))
profits= np.array(list(trainData['profit'])+list(testData['profit']))
return {'inputs':data, 'profits':profits}
def trade_single(NNfile, fileWithRates,calcData=None):
nn = loadNetwork(NNfile)
if nn is None:
print "Failed to load network, exiting"
return
trainData, testData = prepareData(fileWithRates=fileWithRates, calcData=calcData)
data = np.array(list(trainData['inputs'])+list(testData['inputs']))
profits= np.array(list(trainData['profit'])+list(testData['profit']))
print ("Trading...")
decisions = nn.calcOutputs(data)
pnl = [0]
for decision, profit in zip(decisions, profits):
if max(decision) in (decision[0], decision[1]): # short
pnl.append(pnl[-1]-profit)
elif max(decision) in (decision[-1], decision[-2]): # long
pnl.append(pnl[-1]+profit)
else:
pnl.append(0)
pnl = pnl[1:]
return {"decisions":decisions, "pnl":pnl, "profits":profits}
# old trade aggregate, without flipover trading
"""def trade_aggregate(NNfiles, fileWithRates,calcDatas, aggregateDecisions=None):
#ggregateDecisions = aggregateDecisions or simple_sum
results = [trade_single(x,fileWithRates,y) for x,y in zip(NNfiles,calcDatas)]
decisions=[x["decisions"] for x in results]
print(decisions)
pnls = [x["pnl"] for x in results]
profits=results[0]["profits"]
decisionPoints = zip(*decisions)
totalDecisions = [aggregateDecisions(x) for x in decisionPoints]
pnl = [0]
for decision, profit in zip(totalDecisions, profits):
if max(decision) == (decision[0]): # short
pnl.append(pnl[-1]-profit)
elif max(decision) == (decision[-1]): # long
pnl.append(pnl[-1]+profit)
else:
pnl.append(0)
pnl = pnl[1:]
return {"decisions":decisions, "pnl":pnl, "profits":profits}"""
# with flipover trading
def trade_aggregate(NNfiles, fileWithRates,calcDatas, aggregateDecisions=None):
def flatten_decisions(decisions):
res = []
for decision in decisions:
if max(decision) == (decision[0]):
res.append(-1)
elif max(decision) == decision[-1]:
res.append(+1)
else:
res.append(0)
return res
def flipover(decisions):
res = [0] #extra 0 to be removed
for i in range(len(decisions)):
point = decisions[i]
if point is 0:
res.append(res[-1])
else:
res.append(point)
return res[1:]
results = [trade_single(x,fileWithRates,y) for x,y in zip(NNfiles,calcDatas)]
decisions=[x["decisions"] for x in results]
flat_decisions = [ flatten_decisions(x) for x in decisions]
flip_decisions = [ flipover(x) for x in flat_decisions]
pnls = [x["pnl"] for x in results]
profits=results[0]["profits"]
decisionPoints = zip(*flip_decisions)
totalDecisions = [aggregateDecisions(x) for x in decisionPoints]
flipTotalDecisions = flipover(totalDecisions)
pnl = [0]
for decision, profit in zip(flipTotalDecisions, profits):
if decision == -1: # short
pnl.append(pnl[-1]-profit)
elif decision == +1: # long
pnl.append(pnl[-1]+profit)
else:
pnl.append(0)
pnl = pnl[1:]
return {"decisions":decisions, "pnl":pnl, "profits":profits}
def show_graphs(result):
import matplotlib.pyplot as plt
# Train
plt.figure ()
plt.plot(result["pnl"])
plt.show()
if __name__=="__main__":
from tnn.calib.trade_config import params
NNFiles = params["networks"]
fileWithRates=params["fileWithRates"]
calcDatas = params["calcDatas"]
aggregateLogic = params["aggregateLogic"]
result=None
if len(NNFiles) is 1:
result = trade_single(NNFiles[0], fileWithRates, calcDatas[0])
else:
result = trade_aggregate(NNFiles, fileWithRates, calcDatas, aggregateLogic)
show_graphs(result)
| mit |
Djabbz/scikit-learn | sklearn/datasets/samples_generator.py | 103 | 56423 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
songjs1993/DeepLearning | 5Project/facenet/src/validate_on_lfw.py | 1 | 5447 | """Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted. Both the model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import facenet
import lfw
import os
import sys
import math
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
# Load the model
facenet.load_model(args.model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
#image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs
image_size = args.image_size
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
batch_size = args.lfw_batch_size
nrof_images = len(paths)
nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array,
actual_issame, nrof_folds=args.lfw_nrof_folds)
print("finish!")
return
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('lfw_dir', type=str,
help='Path to the data directory containing aligned LFW face patches.')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('model', type=str,
help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='png', choices=['jpg', 'png'])
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| apache-2.0 |
FEniCS/dolfin | demo/undocumented/multimesh-poisson/python/demo_multimesh-poisson.py | 1 | 3820 | # Copyright (C) 2015 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2015-11-05
# Last changed: 2015-11-17
#
# This demo program solves Poisson's equation on a domain defined by
# three overlapping and non-matching meshes. The solution is computed
# on a sequence of rotating meshes to test the multimesh
# functionality.
from dolfin import *
import matplotlib.pyplot as plt
if has_pybind11():
print("Not supported in pybind11")
exit()
class DirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
def solve_poisson(t, x1, y1, x2, y2, plot_solution,
u0_file, u1_file, u2_file):
"Compute solution for given mesh configuration"
# Create meshes
r = 0.5
mesh_0 = RectangleMesh(Point(-r, -r), Point(r, r), 16, 16)
mesh_1 = RectangleMesh(Point(x1 - r, y1 - r), Point(x1 + r, y1 + r), 8, 8)
mesh_2 = RectangleMesh(Point(x2 - r, y2 - r), Point(x2 + r, y2 + r), 8, 8)
mesh_1.rotate(70*t)
mesh_2.rotate(-70*t)
# Build multimesh
multimesh = MultiMesh()
multimesh.add(mesh_0)
multimesh.add(mesh_1)
multimesh.add(mesh_2)
multimesh.build()
# Create function space
V = MultiMeshFunctionSpace(multimesh, "Lagrange", 1)
# Define trial and test functions and right-hand side
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(1)
# Define facet normal and mesh size
n = FacetNormal(multimesh)
h = 2.0*Circumradius(multimesh)
h = (h('+') + h('-')) / 2
# Set parameters
alpha = 4.0
beta = 4.0
# Define bilinear form
a = dot(grad(u), grad(v))*dX \
- dot(avg(grad(u)), jump(v, n))*dI \
- dot(avg(grad(v)), jump(u, n))*dI \
+ alpha/h*jump(u)*jump(v)*dI \
+ beta*dot(jump(grad(u)), jump(grad(v)))*dO
# Define linear form
L = f*v*dX
# Assemble linear system
A = assemble_multimesh(a)
b = assemble_multimesh(L)
# Apply boundary condition
zero = Constant(0)
boundary = DirichletBoundary()
bc = MultiMeshDirichletBC(V, zero, boundary)
bc.apply(A, b)
# Compute solution
u = MultiMeshFunction(V)
solve(A, u.vector(), b)
# Save to file
u0_file << u.part(0)
u1_file << u.part(1)
u2_file << u.part(2)
# Plot solution (last time)
#if plot_solution:
# plt.figure(); plot(V.multimesh())
# plt.figure(); plot(u.part(0), title="u_0")
# plt.figure(); plot(u.part(1), title="u_1")
# plt.figure(); plot(u.part(2), title="u_2")
# plt.show()
if MPI.size(mpi_comm_world()) > 1:
info("Sorry, this demo does not (yet) run in parallel.")
exit(0)
# Parameters
T = 40.0
N = 400
dt = T / N
# Files for storing solution
u0_file = File("u0.pvd")
u1_file = File("u1.pvd")
u2_file = File("u2.pvd")
# Iterate over configurations
for n in range(N):
info("Computing solution, step %d / %d." % (n + 1, N))
# Compute coordinates for meshes
t = dt*n
x1 = sin(t)*cos(2*t)
y1 = cos(t)*cos(2*t)
x2 = cos(t)*cos(2*t)
y2 = sin(t)*cos(2*t)
# Compute solution
solve_poisson(t, x1, y1, x2, y2, n == N - 1,
u0_file, u1_file, u2_file)
| lgpl-3.0 |
ligo-cbc/pycbc | pycbc/results/scatter_histograms.py | 4 | 29832 | # Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate figures with scatter plots and histograms.
"""
import itertools
import sys
import numpy
import scipy.stats
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
if 'matplotlib.backends' not in sys.modules: # nopep8
matplotlib.use('agg')
from matplotlib import (offsetbox, pyplot, gridspec)
from pycbc.results import str_utils
from pycbc.io import FieldArray
def create_axes_grid(parameters, labels=None, height_ratios=None,
width_ratios=None, no_diagonals=False):
"""Given a list of parameters, creates a figure with an axis for
every possible combination of the parameters.
Parameters
----------
parameters : list
Names of the variables to be plotted.
labels : {None, dict}, optional
A dictionary of parameters -> parameter labels.
height_ratios : {None, list}, optional
Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
width_ratios : {None, list}, optional
Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
no_diagonals : {False, bool}, optional
Do not produce axes for the same parameter on both axes.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
elif any(p not in labels for p in parameters):
raise ValueError("labels must be provided for all parameters")
# Create figure with adequate size for number of parameters.
ndim = len(parameters)
if no_diagonals:
ndim -= 1
if ndim < 3:
fsize = (8, 7)
else:
fsize = (ndim*3 - 1, ndim*3 - 2)
fig = pyplot.figure(figsize=fsize)
# create the axis grid
gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios,
height_ratios=height_ratios,
wspace=0.05, hspace=0.05)
# create grid of axis numbers to easily create axes in the right locations
axes = numpy.arange(ndim**2).reshape((ndim, ndim))
# Select possible combinations of plots and establish rows and columns.
combos = list(itertools.combinations(parameters, 2))
# add the diagonals
if not no_diagonals:
combos += [(p, p) for p in parameters]
# create the mapping between parameter combos and axes
axis_dict = {}
# cycle over all the axes, setting thing as needed
for nrow in range(ndim):
for ncolumn in range(ndim):
ax = pyplot.subplot(gs[axes[nrow, ncolumn]])
# map to a parameter index
px = parameters[ncolumn]
if no_diagonals:
py = parameters[nrow+1]
else:
py = parameters[nrow]
if (px, py) in combos:
axis_dict[px, py] = (ax, nrow, ncolumn)
# x labels only on bottom
if nrow + 1 == ndim:
ax.set_xlabel('{}'.format(labels[px]), fontsize=18)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
# y labels only on left
if ncolumn == 0:
ax.set_ylabel('{}'.format(labels[py]), fontsize=18)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
else:
# make non-used axes invisible
ax.axis('off')
return fig, axis_dict
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde
def create_density_plot(xparam, yparam, samples, plot_density=True,
plot_contours=True, percentiles=None, cmap='viridis',
contour_color=None, xmin=None, xmax=None,
ymin=None, ymax=None, exclude_region=None,
fig=None, ax=None, use_kombine=False):
"""Computes and plots posterior density and confidence intervals using the
given samples.
Parameters
----------
xparam : string
The parameter to plot on the x-axis.
yparam : string
The parameter to plot on the y-axis.
samples : dict, numpy structured array, or FieldArray
The samples to plot.
plot_density : {True, bool}
Plot a color map of the density.
plot_contours : {True, bool}
Plot contours showing the n-th percentiles of the density.
percentiles : {None, float or array}
What percentile contours to draw. If None, will plot the 50th
and 90th percentiles.
cmap : {'viridis', string}
The name of the colormap to use for the density plot.
contour_color : {None, string}
What color to make the contours. Default is white for density
plots and black for other plots.
xmin : {None, float}
Minimum value to plot on x-axis.
xmax : {None, float}
Maximum value to plot on x-axis.
ymin : {None, float}
Minimum value to plot on y-axis.
ymax : {None, float}
Maximum value to plot on y-axis.
exclue_region : {None, str}
Exclude the specified region when plotting the density or contours.
Must be a string in terms of `xparam` and `yparam` that is
understandable by numpy's logical evaluation. For example, if
`xparam = m_1` and `yparam = m_2`, and you want to exclude the region
for which `m_2` is greater than `m_1`, then exclude region should be
`'m_2 > m_1'`.
fig : {None, pyplot.figure}
Add the plot to the given figure. If None and ax is None, will create
a new figure.
ax : {None, pyplot.axes}
Draw plot on the given axis. If None, will create a new axis from
`fig`.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure the plot was made on.
ax : pyplot.axes
The axes the plot was drawn on.
"""
if percentiles is None:
percentiles = numpy.array([50., 90.])
percentiles = 100. - numpy.array(percentiles)
percentiles.sort()
if ax is None and fig is None:
fig = pyplot.figure()
if ax is None:
ax = fig.add_subplot(111)
# convert samples to array and construct kde
xsamples = samples[xparam]
ysamples = samples[yparam]
arr = numpy.vstack((xsamples, ysamples)).T
kde = construct_kde(arr, use_kombine=use_kombine)
# construct grid to evaluate on
if xmin is None:
xmin = xsamples.min()
if xmax is None:
xmax = xsamples.max()
if ymin is None:
ymin = ysamples.min()
if ymax is None:
ymax = ysamples.max()
npts = 100
X, Y = numpy.mgrid[
xmin:xmax:complex(0, npts), # pylint:disable=invalid-slice-index
ymin:ymax:complex(0, npts)] # pylint:disable=invalid-slice-index
pos = numpy.vstack([X.ravel(), Y.ravel()])
if use_kombine:
Z = numpy.exp(kde(pos.T).reshape(X.shape))
draw = kde.draw
else:
Z = kde(pos).T.reshape(X.shape)
draw = kde.resample
if exclude_region is not None:
# convert X,Y to a single FieldArray so we can use it's ability to
# evaluate strings
farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
Z[farr[exclude_region]] = 0.
if plot_density:
ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
aspect='auto', cmap=cmap, zorder=1)
if contour_color is None:
contour_color = 'w'
if plot_contours:
# compute the percentile values
resamps = kde(draw(int(npts**2)))
if use_kombine:
resamps = numpy.exp(resamps)
s = numpy.percentile(resamps, percentiles)
if contour_color is None:
contour_color = 'k'
# make linewidths thicker if not plotting density for clarity
if plot_density:
lw = 1
else:
lw = 2
ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
zorder=3)
# label contours
lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
fmt = dict(zip(ct.levels, lbls))
fs = 12
ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)
return fig, ax
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
linestyle='-',
title=True, expected_value=None,
expected_color='red', rotated=False,
plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
for val in plotp:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '${0}$'.format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# remove ticks and set limits
if rotated:
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):
""" Sets the title of the marginal histograms.
Parameters
----------
ax : Axes
The `Axes` instance for the plot.
fmt : str
The string to add to the title.
color : str
The color of the text to add to the title.
label : str
If title does not exist, then include label at beginning of the string.
rotated : bool
If `True` then rotate the text 270 degrees for sideways title.
"""
# get rotation angle of the title
rotation = 270 if rotated else 0
# get how much to displace title on axes
xscale = 1.05 if rotated else 0.0
if rotated:
yscale = 1.0
elif len(ax.get_figure().axes) > 1:
yscale = 1.15
else:
yscale = 1.05
# get class that packs text boxes vertical or horizonitally
packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker
# if no title exists
if not hasattr(ax, "title_boxes"):
# create a text box
title = "{} = {}".format(label, fmt)
tbox1 = offsetbox.TextArea(
title,
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
# save a list of text boxes as attribute for later
ax.title_boxes = [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# else append existing title
else:
# delete old title
ax.title_anchor.remove()
# add new text box to list
tbox1 = offsetbox.TextArea(
" {}".format(fmt),
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
ax.title_boxes = ax.title_boxes + [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# add new title and keep reference to instance as an attribute
anchored_ybox = offsetbox.AnchoredOffsetbox(
loc=2, child=ybox, pad=0.,
frameon=False, bbox_to_anchor=(xscale, yscale),
bbox_transform=ax.transAxes, borderpad=0.)
ax.title_anchor = ax.add_artist(anchored_ybox)
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
marginal_percentiles=None, contour_percentiles=None,
marginal_title=True, marginal_linestyle='-',
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p: val for p, val in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p: val for p, val in maxs.items()}
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# Diagonals...
if plot_marginal:
for pi, param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(
ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color,
linestyle=marginal_linestyle, linecolor=line_color,
title=marginal_title, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(
px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
def remove_common_offset(arr):
"""Given an array of data, removes a common offset > 1000, returning the
removed value.
"""
offset = 0
isneg = (arr <= 0).all()
# make sure all values have the same sign
if isneg or (arr >= 0).all():
# only remove offset if the minimum and maximum values are the same
# order of magintude and > O(1000)
minpwr = numpy.log10(abs(arr).min())
maxpwr = numpy.log10(abs(arr).max())
if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3:
offset = numpy.floor(10**minpwr)
if isneg:
offset *= -1
arr = arr - offset
return arr, int(offset)
def reduce_ticks(ax, which, maxticks=3):
"""Given a pyplot axis, resamples its `which`-axis ticks such that are at most
`maxticks` left.
Parameters
----------
ax : axis
The axis to adjust.
which : {'x' | 'y'}
Which axis to adjust.
maxticks : {3, int}
Maximum number of ticks to use.
Returns
-------
array
An array of the selected ticks.
"""
ticks = getattr(ax, 'get_{}ticks'.format(which))()
if len(ticks) > maxticks:
# make sure the left/right value is not at the edge
minax, maxax = getattr(ax, 'get_{}lim'.format(which))()
dw = abs(maxax-minax)/10.
start_idx, end_idx = 0, len(ticks)
if ticks[0] < minax + dw:
start_idx += 1
if ticks[-1] > maxax - dw:
end_idx -= 1
# get reduction factor
fac = int(len(ticks) / maxticks)
ticks = ticks[start_idx:end_idx:fac]
return ticks
| gpl-3.0 |
arthuc01/Molecular-Pathways-from-molecular-similarity | mol-similarity-pathways.py | 1 | 1816 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 10:52:04 2015
@author: chxcja
Can we reconstruct a biosynthetic pathway simply from the structures.
It turns out the answer is no - but you can get large contiguous parts of the
pathway - but there needs to be chemical insight into identifying unrealistic
chemical or metabolic transformations.
This model uses some of the structures from the pseudomonic acid pathway
see cdx file
"""
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem import Draw
import networkx as nx
G=nx.Graph()
mols = Chem.SmilesMolSupplier('pseudomonicAcid-pathway-smiles.txt')
print len(mols)
#set up nodes from each molecule
G.add_nodes_from([0,1,2,3,4,5,6])
#fps = [FingerprintMols.FingerprintMol(x) for x in mols]
#Morgan fingerprints work better - radius doesn't seem to make much diff.
fps = [AllChem.GetMorganFingerprintAsBitVect(x,2, nBits=1024) for x in mols]
for mol in range(len(mols)-1):
#fig = Draw.MolToMPL(mols[mol])
max2Start=0
max2Temp=0
maxStart=0
maxTemp=0
for mol2 in range(mol, len(mols)):
if mol != mol2:
temp = DataStructs.FingerprintSimilarity(fps[mol],fps[mol2])
if temp>maxTemp:
max2Start = maxTemp
max2Temp = maxStart
maxTemp = temp
maxStart = mol2
print mol, mol2, temp
#Create node between most similar molecules
G.add_edge(mol, maxStart,weight = maxTemp, len=(1-maxTemp))
#G.add_edge(mol, max2Start,weight = max2Temp, len=(1-max2Temp))
print "**", mol, maxStart, maxTemp
import matplotlib.pyplot as plt
nx.draw(G, prog = "neato", with_labels=True)
print(nx.shortest_path(G,source=0,target=6))
| mit |
aydevosotros/TFG-AntonioMolina | TFG/RBFNN/RBFNN.py | 1 | 7224 | '''
Created on Jun 14, 2014
Based of a RBFNN for python written by Thomas Rueckstiess.
http://www.rueckstiess.net/research/snippets/show/72d2363e
Added by Antonio Molina:
- K-means clustering
@author: antonio
'''
from enum import Enum
from scipy import *
import numpy as np
from scipy.linalg import norm, pinv
from scipy.cluster.vq import kmeans
from matplotlib import pyplot as plt
from benchmarks.DataGenerator import DataGenerator
from scipy.optimize.optimize import fmin_cg, fmin_bfgs
from scipy.optimize import minimize
from scipy.spatial import distance
# class MinimizationMethods(Enum):
# metaplasticity = 'metaplasticity'
# NelderMead = 'Nelder-Mead'
# Powell = 'Powell'
# CG = 'CG'
# BFGS = 'BFGS'
# NewtonCG = 'Newton-CG'
# LBFGSB = 'L-BFGS-B'
# TNC = 'TNC'
# MinimizationMethods = Enum(METAPLASTICITY = 'metaplasticity', NELDERMEAD = 'Nelder-Mead', POWELL = 'Powell', CG = 'CG', BFGS = 'BFGS', NEWTONGC = 'Newton-CG', LBFGSB = 'L-BFGS-B', TNC = 'TNC', COBYLA = 'COBYLA')
class RBFNN(object):
def __init__(self, indim, numCenters, outdim, trainingCentroidsMethod="knn", trainingWeightsMethod="BFGS", beta=1.0/8.0, metaplasticity=False):
self.indim = indim
self.outdim = outdim
self.numCenters = numCenters
self.centers = [random.uniform(-1, 1, indim) for i in xrange(numCenters)]
self.beta = beta
self.trainingCentroidsMethod=trainingCentroidsMethod
self.trainingWeightsMethod = trainingWeightsMethod
self.W = random.random((self.numCenters, self.outdim))
self.metaplasticity = metaplasticity
self.radialFunc = "iGaussian"
self.gError = zeros(0)
def _basisfunc(self, c, d):
assert len(d) == self.indim
return exp(-self.beta * norm(c-d)**2)
def _gaussianFunc(self, c, d):
return exp(-((self.beta * norm(c-d))**2))
def _isotropicGaussian(self, c, d):
return exp((-(self.numCenters/(self.dm)**2))*(distance.euclidean(c, d)**2))
def _calcAct(self, X):
G = zeros((X.shape[0], self.numCenters), float)
for ci, c in enumerate(self.centers):
for xi, x in enumerate(X):
if self.radialFunc == "gaussian":
G[xi,ci] = self._gaussianFunc(c, x)
elif self.radialFunc == "iGaussian":
G[xi,ci] = self._isotropicGaussian(c, x)
else:
G[xi,ci] = self._basisfunc(c, x)
return G
def _costBasic(self, W, *args):
X, Y = args
ej = 0.0
# Error computation
for i, xi in enumerate(X):
fx = dot(transpose(self.G[i]), W)
ej += ((Y[i]-fx)**2)
ej = ej/len(X)
self.gError = append(self.gError, ej)
return ej
def _costFunction(self, W, *args):
X, Y = args
ej = 0.0
# Error computation
for i, xi in enumerate(X):
fx = dot(transpose(self.G[i]), W)
p = (1.0) if not self.metaplasticity else 1-self.P[i]
ej += ((Y[i]-fx)**2)*(1/p)
ej = ej/len(X)
self._costBasic(W, X, Y)
return ej
def _pEstimator(self, x):
p=0.0
for k in xrange(self.numCenters):
p+=self._isotropicGaussian(self.centers[k], x)
return p/self.numCenters
def _minimization(self, X, Y, method=None):
w = np.copy(self.W)
self.it = 0
if method == 'metaplasticity':
res = minimize(self._costFunction, w, method = 'Nelder-Mead', args = (X,Y))
else:
res = minimize(self._costFunction, w, method = method, args = (X,Y))
print res
self.W = res.x
def _cgmin(self, X, Y):
w = np.copy(self.W)
res = fmin_bfgs(self._costFunction, w, args=(X,Y), full_output=1, retall=1)
self.allvec = res[-1]
self.W = res[0]
self.gradEval = res[-2]
def _gcb(self, xk):
self.it+=1
print 'The cost for the it: ', self.it, ' is: ', xk
def _gradientDescent(self, X, Y, iterations):
error = np.zeros(iterations, float)
G = self._calcAct(X)
for it in xrange(iterations):
ej = self._costFunction(self.W, X, Y)
gj = self._partialCost(self.W, X, Y)
error[it] = ej;
print "El error para la iteracion %d es %.15f y el gradiente:\n"%(it, ej), gj
self.W = self.W - 0.1*gj
plt.clf()
plt.plot(range(iterations), error)
# plt.show()
def train(self, X, Y):
""" X: matrix of dimensions n x indim
y: column vector of dimension n x 1 """
if self.trainingCentroidsMethod == "random":
print 'Training with randomly chosen centroids'
rnd_idx = random.permutation(X.shape[0])[:self.numCenters]
self.centers = [X[i,:] for i in rnd_idx]
elif self.trainingCentroidsMethod == "knn":
print 'Training with centroids from k-means algorithm'
self.centers = kmeans(X, self.numCenters)[0]
else:
print "You must set the training method"
return
# calculate activations of RBFs
# Para la gaussiana isotropica calculo la distncia maxima
dis = [distance.euclidean(self.centers[i], self.centers[j]) for i in range(self.numCenters) for j in range(self.numCenters) if i != j]
self.dm = np.amax(dis)
self.P = [self._pEstimator(x[1]) for x in enumerate(X)]
self.G = self._calcAct(X)
# print self.G
# self._gradientDescent(X, Y, 7000)
if self.trainingWeightsMethod == "pseudoinverse":
self.W = dot(pinv(self.G), Y)
elif self.trainingWeightsMethod == "cgmin":
self._cgmin(X, Y)
else:
# self._gradientDescent(X, Y, 100)
self._minimization(X, Y, self.trainingWeightsMethod)
# calculate output weights (pseudoinverse)
def test(self, X):
""" X: matrix of dimensions n x indim """
G = self._calcAct(X)
Y = dot(G, self.W)
return Y
# Some code to debug
if __name__ == '__main__':
print "Ejecutando codigo de debug"
dim=2
nSamples=500
dataGenerator = DataGenerator()
# dataGenerator.generateClusteredRandomData(nSamples, 2, dim)
dataGenerator.generateRealData('cancer', True)
perf = 0.0
print "InDim: ", len(dataGenerator.getTrainingX()[0])
for i in range(10):
rbfnn = RBFNN(len(dataGenerator.getTrainingX()[0]), 2, 1, 'knn', 'cgmin', metaplasticity=True)
rbfnn.train(dataGenerator.getTrainingX(), dataGenerator.getTrainingY())
perf += dataGenerator.verifyResult(rbfnn.test(dataGenerator.getValidationX()))
print "El rendimiento medio es: ", perf/10
#Plotting data
# colors = ["red", "blue"]
# for i,x in enumerate(dataGenerator.getTrainingX()):
# plt.plot(x[0], x[1], "o", color= colors[0] if dataGenerator.getTrainingY()[i]>0 else colors[1])
# plt.xlabel("First dimension")
# plt.ylabel("Second dimension")
# plt.show()
| gpl-2.0 |
weaver-viii/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_link_functions_gaussianGLM.py | 3 | 1458 | import sys
sys.path.insert(1, "../../../")
import h2o
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_gaussian(ip,port):
print("Read in prostate data.")
h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).
open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,9]
sm_data_features = sm_data[:,1:9]
print("Testing for family: GAUSSIAN")
print("Set variables for h2o.")
myY = "GLEASON"
myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
print("Create models with canonical link: IDENTITY")
h2o_model = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gaussian", link="identity",alpha=[0.5], Lambda=[0])
sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Gaussian(sm.families.links.identity)).fit()
print("Compare model deviances for link function identity")
h2o_deviance = h2o_model.residual_deviance() / h2o_model.null_deviance()
sm_deviance = sm_model.deviance / sm_model.null_deviance
assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_functions_gaussian)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/indexes/period/test_setops.py | 15 | 10772 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
assert isinstance(joined, PeriodIndex)
assert joined.freq == index.freq
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
assert index is res
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with pytest.raises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assert_raises_regex(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with pytest.raises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
tm.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with pytest.raises(period.IncompatibleFrequency):
index.intersection(index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with pytest.raises(period.IncompatibleFrequency):
index.intersection(index3)
def test_intersection_cases(self):
base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = period_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = period_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], name='idx', freq='D')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# non-monotonic
base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
rng2 = PeriodIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
freq='D', name='idx')
expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name='idx')
rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',
'2011-02-03'],
freq='D', name='other')
expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], freq='D', name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == 'D'
# empty same freq
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
assert len(result) == 0
result = rng.intersection(rng[0:0])
assert len(result) == 0
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
| mit |
stefangri/s_s_productions | PHY341/V356_Kettenschaltungen/Messdaten/auswertung.py | 1 | 12836 | import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
import math
import latex
from uncertainties.umath import *
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from pint import UnitRegistry
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
u = UnitRegistry()
Q_ = u.Quantity
#Apparaturkonstanten
L = Q_(1.75e-3, 'henry')
C = Q_(22.0e-9, 'farad')
C_1 = C
C_2 = Q_(9.39e-9, 'farad')
theta = np.linspace(-0.2 * np.pi, np.pi, 1000)
#Dispersionskurven
#gleiche kondensatoren
def omega(theta):
return np.sqrt( (2 / (L.magnitude * C.magnitude)) * (1 - np.cos(theta) ) )
def nu(omega):
return 1/(2*np.pi) * omega
#unterschiedliche Kondensatoren
def omega1(theta):
return np.sqrt( 1/ L.magnitude * (1/C_1.magnitude + 1/C_2.magnitude) + 1/L.magnitude*np.sqrt( (1/C_1.magnitude + 1/C_2.magnitude)**2 - 4*np.sin(theta)**2/(C_1.magnitude*C_2.magnitude) ))
def omega2(theta):
return np.sqrt( 1/ L.magnitude * (1/C_1.magnitude + 1/C_2.magnitude) - 1/L.magnitude*np.sqrt( (1/C_1.magnitude + 1/C_2.magnitude)**2 - 4*np.sin(theta)**2/(C_1.magnitude*C_2.magnitude) ))
def f(x, A, B, C):
return A * np.exp(B * x) + C
def f_cos(x, A, B, C, D):
return A * np.cos(B*x + C) + D
#Phasengeschwindigkeit
def v_phase(nus):
return 2 * np.pi * nus / ( np.arccos(1 - 0.5 * (2 * np.pi * nus)**2 * L.magnitude * C.magnitude) )
#Gruppengeschwindigkeit
def v_gruppe(nus):
return np.sqrt( 1/(L.magnitude * C.magnitude) * (1 - 0.25 * L.magnitude * C.magnitude * (2*np.pi*nus)) )
def impedanz_plot(omega):
return np.sqrt(L.magnitude / C.magnitude) * 1/np.sqrt( 1 - 0.25 * omega**2 * L.magnitude * C.magnitude )
def impedanz(omega):
return np.sqrt(L / C) * 1/np.sqrt( 1 - 0.25 * omega**2 * L * C )
#variabel_1,variabel_2=np.genfromtxt('name.txt',unpack=True)
#Einlesen der Messwerte
eigenfrequenzen_a_LC = np.genfromtxt('eigenfrequenzen_a_LC.txt', unpack=True)
range_lin = np.linspace(1, len(eigenfrequenzen_a_LC), 12)
Phasenverschiebung_pro_glied_LC = np.pi * range_lin / 16
latex.Latexdocument('tabs/eigenfrequenzen_dispersion_LC.tex').tabular([Phasenverschiebung_pro_glied_LC, eigenfrequenzen_a_LC],
'{$\\theta$} & {$\\nu$ in $\si{\hertz}$}', [1, 0],
caption = 'LC-Kette, Gemessene Frequenzen mit zugeordnetem Phasenversatz pro Glied', label = 'tab: dispersion_LC')
eigenfrequenzen_a_LC1C2 = np.genfromtxt('eigenfrequenzen_a_LC1C2.txt', unpack=True)
range_lin = np.linspace(1, len(eigenfrequenzen_a_LC1C2), len(eigenfrequenzen_a_LC1C2))
Phasenverschiebung_pro_glied_LC1C2 = np.pi * range_lin / 16
for i in range(0, len(Phasenverschiebung_pro_glied_LC1C2)):
if (Phasenverschiebung_pro_glied_LC1C2[i] > np.pi/2):
Phasenverschiebung_pro_glied_LC1C2[i] -= 2*(Phasenverschiebung_pro_glied_LC1C2[i] - np.pi/2)
latex.Latexdocument('tabs/eigenfrequenzen_dispersion_LC1C2.tex').tabular([Phasenverschiebung_pro_glied_LC1C2, eigenfrequenzen_a_LC1C2],
'{$\\theta$} & {$\\nu$ in $\si{\hertz}$}', [1, 0],
caption = '$LC_1C_2$-Kette, Gemessene Frequenzen mit zugeordnetem Phasenversatz pro Glied', label = 'tab: dispersion_LC1C2')
frequenzen_sweep_LC = np.genfromtxt('frequenz_sweep_LC.txt', unpack = True)
x_range_LC = ((np.linspace(1, len(frequenzen_sweep_LC), len(frequenzen_sweep_LC))-1) * 4)[::-1]
params_LC, covariance_LC = curve_fit(f, x_range_LC, frequenzen_sweep_LC)
errors_LC = np.sqrt(np.diag(covariance_LC))
A_param = ufloat(params_LC[0], errors_LC[0])
B_param = ufloat(params_LC[1], errors_LC[1])
C_param = ufloat(params_LC[2], errors_LC[2])
print('Parameter des Fits LC, A= ', A_param, ' B= ', B_param, ' C= ', C_param)
latex.Latexdocument('tabs/sweep_LC.tex').tabular([x_range_LC[::-1], frequenzen_sweep_LC[::-1]],
'{x in $\si{\centi\meter}$} & {Frequenzen in $\si{\hertz}$}', [0, 0],
caption = 'LC-Kette, Referenzpunkte für den Frequenzsweep', label = 'tab: sweep_LC')
def frequenz_sweep_LC(x):
return A_param * exp(B_param * x) + C_param
frequenzen_sweep_LC1C2 = np.genfromtxt('frequenz_sweep_LC1C2.txt', unpack = True)
x_range_LC1C2 = ((np.linspace(1, len(frequenzen_sweep_LC1C2), len(frequenzen_sweep_LC1C2))-1) * 4)[::-1]
print(x_range_LC1C2)
params_LC1C2, covariance_LC1C2 = curve_fit(f, x_range_LC1C2, frequenzen_sweep_LC1C2)
errors_LC1C2 = np.sqrt(np.diag(covariance_LC1C2))
A_param_LC1C2 = ufloat(params_LC1C2[0], errors_LC1C2[0])
B_param_LC1C2 = ufloat(params_LC1C2[1], errors_LC1C2[1])
C_param_LC1C2 = ufloat(params_LC1C2[2], errors_LC1C2[2])
latex.Latexdocument('tabs/sweep_LC1C2.tex').tabular([x_range_LC1C2[::-1], frequenzen_sweep_LC1C2[::-1]],
'{x in $\si{\centi\meter}$} & {Frequenzen in $\si{\hertz}$}', [0, 0],
caption = '$LC_1C_2$-Kette, Referenzpunkte für den Frequenzsweep', label = 'tab: sweep_LC1C2')
def frequenz_sweep_LC1C2(x):
return A_param_LC1C2 * exp(B_param_LC1C2 * x) + C_param_LC1C2
print('Parameter des Fits LC1C2, A= ', A_param_LC1C2, ' B= ', B_param_LC1C2, ' C= ', C_param_LC1C2)
#Berechnungen
#Theoretische Werte
omega_G_LC = 2*np.sqrt(1 / (L.magnitude * C.magnitude))
print('Theoretische Grenzfrequenz LC: ', nu(omega_G_LC))
omega_G_u_LC1C2 = np.sqrt(2 / (L * C_1))
omega_G_o_LC1C2 = np.sqrt(2 / (L * C_2))
omega_G_korrektur = np.sqrt(2/L * (C_1 + C_2)/(C_1 * C_2))
print('Theoretische Grenzfrequenz unten LC1C2: ', nu(omega_G_u_LC1C2))
print('Theoretische Grenzfrequenz oben LC1C2: ', nu(omega_G_o_LC1C2))
print('Theoretische Grenzfrequenz Korrektur LC1C2: ', nu(omega_G_korrektur))
#Berechnungen
distance_f_g_LC = ufloat(10.2, 0.5)
distance_f_g_u_LC1C2 = ufloat(6.5, 0.5)
distance_f_g_o_LC1C2 = ufloat(10.7, 0.5)
print('Aus Sweep Methode bestimmte Grenzfrequenz LC: ', frequenz_sweep_LC(distance_f_g_LC))
print('Prozentuale Abweichung: ', (frequenz_sweep_LC(distance_f_g_LC))/nu(omega_G_LC) - 1)
print('Aus Sweep Methode bestimmte Grenzfrequenz unten LC1C2: ', frequenz_sweep_LC1C2(distance_f_g_u_LC1C2))
print('Prozentuale Abweichung: ', (frequenz_sweep_LC1C2(distance_f_g_u_LC1C2))/nu(omega_G_u_LC1C2.magnitude) - 1)
print('Aus Sweep Methode bestimmte Grenzfrequenz oben LC1C2: ', frequenz_sweep_LC1C2(distance_f_g_o_LC1C2))
print('Prozentuale Abweichung: ', (frequenz_sweep_LC1C2(distance_f_g_o_LC1C2))/nu(omega_G_o_LC1C2.magnitude) - 1)
print('Aus Sweep Methode bestimmte Grenzfrequenz Korrektur LC1C2: ', frequenz_sweep_LC1C2(ufloat(14, 0.5)))
print('Prozentuale Abweichung: ', (frequenz_sweep_LC1C2(ufloat(14, 0.5))/nu(omega_G_korrektur).magnitude) - 1)
#Phasengeschwindigkeit
eigenfrequenzen_offen = np.genfromtxt('eigenfrequenzen_offen.txt', unpack=True)
range_lin = np.linspace(1, len(eigenfrequenzen_offen), len(eigenfrequenzen_offen))
Phasenverschiebung_offen = np.pi * range_lin / 16
Phasengeschwindigkeit = 2 * np.pi * eigenfrequenzen_offen/Phasenverschiebung_offen
latex.Latexdocument('tabs/v_phase_LC.tex').tabular([Phasenverschiebung_offen, eigenfrequenzen_offen, Phasengeschwindigkeit],
'{Phasenverschiebung $\\theta$} & {Frequenzen in $\si{\hertz}$} & {$v_{ph}$ in $\si{\meter\per\second}$}', [0, 0, 0],
caption = 'Eigenfrequenzen der LC Kette und berechnete Phasengeschwindigkeiten', label = 'tab: v_phase')
#stehende Wellen
#nu1 = 5092 Hz
messpunkte = np.linspace(1, 17, 17)
print(messpunkte)
spannungsverlauf_nu1 = np.genfromtxt('spannung_nu1.txt', unpack=True)
#latex.Latexdocument('tabs/spannung_nu1.tex').tabular([messpunkte, spannungsverlauf_nu1],
#'{Messpunkte} & {Spannung in $\si{\\volt}$}', [0, 2],
#caption = 'Spannungsverlauf unter der Eigenfrequenz $\\nu_1$', label = 'tab: U_nu1')
spannungsverlauf_nu2 = np.genfromtxt('spannung_nu2.txt', unpack=True)
spannungsverlauf_geschlossen = np.genfromtxt('spannung_geschlossen.txt', unpack=True)
latex.Latexdocument('tabs/spannung_nu1.tex').tabular([messpunkte, spannungsverlauf_nu1, spannungsverlauf_nu2, spannungsverlauf_geschlossen],
'{Messpunkte} & {$U_{\\nu_1}$ in $\si{\\volt}$} & {$U_{\\nu_2}$ in $\si{\\volt}$} & {$U_{G}$ in $\si{\\volt}$ }', [0, 2, 2, 2],
caption = 'Spannungsverläufe $U_{\\nu_1}$ und $U_{\\nu_2}$ unter den Eigenfrequenzen $\\nu_1$ und $\\nu_2$ bei der offenen LC-Kette; Spannungsverlauf $U_{G}$ der geschlossenen LC-Kette', label = 'tab: U_nu12')
#params_nu1, covariance_nu1 = curve_fit(f_cos, messpunkte, spannungsverlauf_nu1)
#params_nu2, covariance_nu2 = curve_fit(f_cos, messpunkte, spannungsverlauf_nu2)
spannungsverlauf_geschlossen = np.genfromtxt('spannung_geschlossen.txt', unpack=True)
#Theorieplots der Disperionsrelation
plt.plot(theta, nu(omega(theta))/1000, label='Dispersionskurve $\\nu(\\theta)$' )
plt.plot(Phasenverschiebung_pro_glied_LC, eigenfrequenzen_a_LC/1000, 'rx', label = 'Messdaten')
plt.plot(theta, np.ones(len(theta))*nu(omega_G_LC)/1000, 'b--', label='Grenzfrequenz $\\nu_G$' )
print(nu(omega_G_LC))
plt.ylabel('Frequenz $\\nu$ in kHz')
plt.xlabel('Phasenverschiebung pro Glied $\\theta$')
plt.xlim(0, theta[-1])
plt.xticks([0, np.pi/8, np.pi / 4, 3*np.pi/8 , np.pi/2, 5 * np.pi/8, 3*np.pi/4, 7*np.pi/8, np.pi],
[r"$0$", r"$\frac{\pi}{8}$", r"$\frac{\pi}{4}$", r"$\frac{3\pi}{8}$", r"$\frac{\pi}{2}$",
r"$\frac{5\pi}{8}$", r"$\frac{3\pi}{4}$", r"$\frac{7\pi}{8}$", r"$\pi$"], fontsize = 16)
plt.legend(loc='best')
plt.grid()
plt.savefig('plots/dispersion.pdf')
#stehende Wellen
plt.clf()
m_range = np.linspace(0, 18, 100)
plt.plot(messpunkte, spannungsverlauf_nu1, 'bo', label='Messwerte')
#plt.plot(m_range, f_cos(m_range, *params_nu1), 'b-')
plt.grid()
plt.legend(loc='best')
plt.xlabel('Messpunkte')
plt.ylabel('Spannung in V')
plt.xlim(0.8, 17.2)
plt.xticks(messpunkte, [int(i) for i in messpunkte], fontsize = 10)
plt.ylim(0, 2)
plt.savefig('plots/spannungsverlauf_nu1.pdf')
plt.clf()
plt.plot(messpunkte, spannungsverlauf_nu2, 'bo', label='Messwerte')
#plt.plot(m_range, f_cos(m_range, *params_nu2), 'b-')
plt.grid()
plt.xlabel('Messpunkte')
plt.ylabel('Spannung in V')
plt.xticks(messpunkte, [int(i) for i in messpunkte], fontsize = 10)
plt.xlim(0.8, 17.2)
plt.ylim(-0.1, 2.5)
plt.legend(loc='best')
plt.savefig('plots/spannungsverlauf_nu2.pdf')
plt.clf()
plt.plot(messpunkte, spannungsverlauf_geschlossen, 'bo', label='Messwerte')
#plt.plot(m_range, f_cos(m_range, *params_nu2), 'b-')
plt.grid()
plt.xlabel('Messpunkte')
plt.ylabel('Spannung in V')
plt.xticks(messpunkte, [int(i) for i in messpunkte], fontsize = 10)
plt.xlim(0.8, 17.2)
plt.ylim(0.35, 0.5)
plt.legend(loc='best')
plt.savefig('plots/spannungsverlauf_geschlossen.pdf')
plt.clf()
plt.plot(theta, nu( omega1(theta) )/1000, label='$\\nu_1(\\theta)$' )
plt.plot(theta, nu( omega2(theta) )/1000, label='$\\nu_2(\\theta)$' )
plt.plot(Phasenverschiebung_pro_glied_LC1C2, eigenfrequenzen_a_LC1C2/1000, 'rx', label='Messwerte')
plt.plot(theta, np.ones(len(theta))*nu(omega_G_u_LC1C2)/1000, 'g--', label= 'Grenzfequenzen')
plt.plot(theta, np.ones(len(theta))*nu(omega_G_o_LC1C2)/1000, 'g--')
plt.plot(theta, np.ones(len(theta))*nu(omega_G_korrektur)/1000, 'g--')
plt.ylabel('Frequenz $\\nu$ in kHz')
plt.xlabel('Phasenverschiebung pro Glied $\\theta$')
plt.xlim(0, np.pi/2 + 0.02)
plt.xticks([0, np.pi/8, np.pi / 4, 3*np.pi/8 , np.pi/2],
[r"$0$", r"$\frac{\pi}{8}$", r"$\frac{\pi}{4}$", r"$\frac{3\pi}{8}$", r"$\frac{\pi}{2}$"], fontsize = 16)
plt.legend(loc='best')
plt.grid()
plt.savefig('plots/dispersion1.pdf')
plt.clf()
plt.plot(nu(omega(theta))/1000, v_phase(nu(omega(theta)))/1000, label='$v_{Ph}(\\nu)$' )
plt.plot(eigenfrequenzen_offen/1000, 2 * np.pi * eigenfrequenzen_offen/Phasenverschiebung_offen/1000, 'rx', label='Messwerte')
plt.ylabel('Phasengeschwindigkeit $v$ in rad/ms')
plt.xlabel('Frequenz $\\nu$ in kHz')
plt.xlim((eigenfrequenzen_offen[0]-1000)/1000, (eigenfrequenzen_offen[-1]+1000)/1000)
plt.legend(loc='best')
plt.grid()
plt.savefig('plots/v_phase.pdf')
#plt.clf()
#plt.plot( omega(theta), impedanz_plot(omega(theta)), label='$Z(\omega)$' )
#plt.ylabel('Impedanz $Z$')
#plt.xlabel('Kreisfrequenz $\omega$ in $1/s$')
#plt.xlim(omega(theta)[0], omega(theta)[-1])
#plt.legend(loc='best')
#plt.grid()
#plt.savefig('plots/impedanz.pdf')
x_lim = np.linspace(x_range_LC[0]+2, x_range_LC[-1]-2, 100)
plt.clf()
plt.plot(x_range_LC, frequenzen_sweep_LC/1000, 'rx', label='Messwerte')
plt.plot(x_lim, f(x_lim, *params_LC)/1000, 'b-', label='Fit $\\nu(x)$')
plt.xlabel('Abstand zum Nullpunkt in cm')
plt.ylabel('Frequenz $\\nu$ in kHz')
plt.grid()
plt.xlim(x_range_LC[-1]-2, x_range_LC[0]+2)
plt.legend(loc='best')
plt.savefig('plots/frequenzsweep_LC.pdf')
x_lim = np.linspace(x_range_LC1C2[0]+2, x_range_LC1C2[-1]-2, 100)
plt.clf()
plt.plot(x_range_LC1C2, frequenzen_sweep_LC1C2/1000, 'rx', label='Messwerte')
plt.plot(x_lim, f(x_lim, *params_LC1C2)/1000, 'b-', label='Fit $\\nu(x)$')
plt.xlabel('Abstand zum Nullpunkt in cm')
plt.ylabel('Frequenz $\\nu$ in Hz')
plt.xlim(x_range_LC1C2[-1]-2, x_range_LC1C2[0]+2)
plt.grid()
plt.legend(loc='best')
plt.savefig('plots/frequenzsweep_LC1C2.pdf')
| mit |
akshaybabloo/Car-ND | Term_1/advanced_lane_finding_10/perspective_transform_10_3.py | 1 | 3752 | import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
# def corners_unwarp(img, nx, ny, mtx, dist):
# # Pass in your image into this function
# # Write code to do the following steps
# # 1) Undistort using mtx and dist
# # 2) Convert to grayscale
# # 3) Find the chessboard corners
# # 4) If corners found:
# # a) draw corners
# # b) define 4 source points src = np.float32([[,],[,],[,],[,]])
# # Note: you could pick any four of the detected corners
# # as long as those four corners define a rectangle
# # One especially smart way to do this would be to use four well-chosen
# # corners that were automatically detected during the undistortion steps
# # We recommend using the automatic detection of corners in your code
# # c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# # d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# # e) use cv2.warpPerspective() to warp your image to a top-down view
# # delete the next two lines
# M = None
# warped = np.copy(img)
# return warped, M
def corners_unwarp(img, nx, ny, mtx, dist):
# Use the OpenCV undistort() function to remove distortion
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| mit |
kundajelab/idr | setup.py | 3 | 2839 | import os, sys
import numpy
from setuptools import setup, Extension, find_packages
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension("idr.inv_cdf",
["idr/inv_cdf.pyx", ],
include_dirs=[numpy.get_include()]),
])
except ImportError:
extensions = [
Extension("idr.inv_cdf",
["idr/inv_cdf.c", ],
include_dirs=[numpy.get_include()]),
]
def main():
if sys.version_info.major <= 2:
raise ValueError( "IDR requires Python version 3 or higher" )
import idr
setup(
name = "idr",
version = idr.__version__,
author = "Nathan Boley",
author_email = "[email protected]",
ext_modules = extensions,
install_requires = [ 'scipy>=0.13.0', 'numpy' ],
extras_require={'PLOT': 'matplotlib'},
packages= ['idr',],
scripts = ['./bin/idr',],
description = ("IDR is a method for measuring the reproducibility of " +
"replicated ChIP-seq type experiments and providing a " +
"stable measure of the reproducibility of identified " +
"peaks."),
license = "GPL3",
keywords = "IDR",
url = "https://github.com/nboley/idr",
long_description="""
The IDR (Irreproducible Discovery Rate) framework is a unified approach to measure the reproducibility of findings identified from replicate experiments and provide highly stable thresholds based on reproducibility. Unlike the usual scalar measures of reproducibility, the IDR approach creates a curve, which quantitatively assesses when the findings are no longer consistent across replicates. In layman's terms, the IDR method compares a pair of ranked lists of identifications (such as ChIP-seq peaks). These ranked lists should not be pre-thresholded i.e. they should provide identifications across the entire spectrum of high confidence/enrichment (signal) and low confidence/enrichment (noise). The IDR method then fits the bivariate rank distributions over the replicates in order to separate signal from noise based on a defined confidence of rank consistency and reproducibility of identifications i.e the IDR threshold.
The method was developed by Qunhua Li and Peter Bickel's group and is extensively used by the ENCODE and modENCODE projects and is part of their ChIP-seq guidelines and standards.
""",
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
],
)
if __name__ == '__main__':
main()
| gpl-2.0 |
grundgruen/powerline | tests/test_eex_algo.py | 2 | 4813 | from unittest import TestCase
import pandas as pd
from zipline.finance import trading
from zipline.utils.factory import create_simulation_parameters
from zipline.test_algorithms import TestAlgorithm
from zipline.finance.commission import PerShare
from zipline.finance.slippage import FixedSlippage
from powerline.utils.data.data_generator import DataGeneratorEex
from powerline.exchanges.eex_exchange import EexExchange
__author__ = "Warren"
class TestEexAlgoTrue(TestCase):
"""
Tests the change in pnl and position for a simple EEX weekly algo.
"""
@classmethod
def setUpClass(cls):
start = pd.Timestamp('2015-05-18', tz='Europe/Berlin').tz_convert(
'UTC')
end = pd.Timestamp('2015-05-22', tz='Europe/Berlin').tz_convert('UTC')
exchange = EexExchange(start=start, end=end)
env = exchange.env
day = '2015-05-20'
ident = exchange.insert_ident(day, exchange.products[0])
expiration_date = pd.Timestamp(day,
tz='Europe/Berlin').tz_convert('UTC')
asset_metadata = {0: {
'asset_type': 'future', 'symbol': ident,
'expiration_date': expiration_date, 'contract_multiplier': 168,
'end_date': expiration_date}}
env.write_data(futures_data=asset_metadata)
instant_fill = True
cls.data, cls.pnl, cls.expected_positions = DataGeneratorEex(
identifier=ident,
env=env,
instant_fill=instant_fill).create_simple()
sim_params = create_simulation_parameters(
start=cls.data.start,
end=cls.data.end)
cls.algo = TestAlgorithm(sid=0, amount=1, order_count=1,
instant_fill=instant_fill,
env=env,
sim_params=sim_params,
commission=PerShare(0),
slippage=FixedSlippage())
cls.results = cls.algo.run(cls.data)
def test_algo_pnl(self):
for dt, pnl in self.pnl.iterrows():
self.assertEqual(self.results.pnl[dt], pnl[0])
def test_algo_positions(self):
for dt, amount in self.expected_positions.iterrows():
if self.results.positions[dt]:
actual_position = \
self.results.positions[dt][0]['amount']
else:
actual_position = 0
self.assertEqual(actual_position, amount[0])
@classmethod
def tearDownClass(cls):
cls.algo = None
trading.environment = None
class TestEexAlgoFalse(TestCase):
"""
Tests the change in pnl and position for a simple EEX weekly algo.
"""
@classmethod
def setUpClass(cls):
start = pd.Timestamp('2014-05-18',
tz='Europe/Berlin').tz_convert('UTC')
end = pd.Timestamp('2015-05-22',
tz='Europe/Berlin').tz_convert('UTC')
exchange = EexExchange(start=start, end=end)
env = exchange.env
day = '2015-05-20'
ident = exchange.insert_ident(day, exchange.products[0])
expiration_date = pd.Timestamp(day,
tz='Europe/Berlin').tz_convert('UTC')
asset_metadata = {0: {
'asset_type': 'future', 'symbol': ident,
'expiration_date': expiration_date, 'contract_multiplier': 168,
'end_date': expiration_date}}
env.write_data(futures_data=asset_metadata)
instant_fill = False
cls.data, cls.pnl, cls.expected_positions = DataGeneratorEex(
identifier=ident,
env=env,
instant_fill=instant_fill).create_simple()
sim_params = create_simulation_parameters(
start=cls.data.start,
end=cls.data.end)
cls.algo = TestAlgorithm(sid=0, amount=1, order_count=1,
instant_fill=instant_fill,
env=env,
sim_params=sim_params,
commission=PerShare(0),
slippage=FixedSlippage())
cls.results = cls.algo.run(cls.data)
def test_algo_pnl(self):
for dt, pnl in self.pnl.iterrows():
self.assertEqual(self.results.pnl[dt], pnl[0])
def test_algo_positions(self):
for dt, amount in self.expected_positions.iterrows():
if self.results.positions[dt]:
actual_position = \
self.results.positions[dt][0]['amount']
else:
actual_position = 0
self.assertEqual(actual_position, amount[0])
@classmethod
def tearDownClass(cls):
cls.algo = None
| apache-2.0 |
rahul-c1/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 30 | 7560 | """
Test the fastica algorithm.
"""
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
jlegendary/opencog | scripts/make_benchmark_graphs.py | 56 | 3139 | #!/usr/bin/env python
# Requires matplotlib for graphing
# reads *_benchmark.csv files as output by atomspace_bm and turns them into
# graphs.
import csv
import numpy as np
import matplotlib.colors as colors
#import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import matplotlib.font_manager as font_manager
import glob
import pdb
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def graph_file(fn,delta_rss=True):
print "Graphing " + fn
records = csv.reader(open(fn,'rb'),delimiter=",")
sizes=[]; times=[]; times_seconds=[]; memories=[]
for row in records:
sizes.append(int(row[0]))
times.append(int(row[1]))
memories.append(int(row[2]))
times_seconds.append(float(row[3]))
left, width = 0.1, 0.8
rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height
rect2 = [left, 0.1, width, 0.4]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor)
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax1.plot(sizes,times_seconds,color='black')
if len(times_seconds) > 1000:
ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue')
if delta_rss:
oldmemories = list(memories)
for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1]
ax2.plot(sizes,memories,color='black')
for label in ax1.get_xticklabels():
label.set_visible(False)
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 7 ticks, pruning the upper and lower so they don't overlap
# with other ticks
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax1.yaxis.set_major_formatter(fmt)
ax2.yaxis.set_major_locator(MyLocator(7, prune='upper'))
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax2.yaxis.set_major_formatter(fmt)
ax2.yaxis.offsetText.set_visible(False)
fig.show()
size = int(fmt.orderOfMagnitude) / 3
labels = ["B","KB","MB","GB"]
label = labels[size]
labels = ["","(10s)","(100s)"]
label += " " + labels[int(fmt.orderOfMagnitude) % 3]
ax2.set_xlabel("AtomSpace Size")
ax2.set_ylabel("RSS " + label)
ax1.set_ylabel("Time (seconds)")
ax1.set_title(fn)
fig.show()
fig.savefig(fn+".png",format="png")
files_to_graph = glob.glob("*_benchmark.csv")
for fn in files_to_graph:
graph_file(fn);
| agpl-3.0 |
mendax-grip/cfdemUtilities | zaki/plotTotalZaki.py | 2 | 2387 | #This program makes the plot for the average velocity of the particles in a the Z direction for
# a single case
# Author : Bruno Blais
# Last modified : December 3rd
#Python imports
import os
import sys
import math
import numpy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator, FormatStrFormatter
import pylab
#=====================
# Main plot
#=====================
fname=sys.argv[1]
#INPUT
print "R-> %s" %fname
np, u,v,w, unorm, wStd, wMax, wMin = numpy.loadtxt(fname, unpack=True)
#Stokes solutions for a single sphere unsteady solution
dt = 2e-6 # time step of the simulation
rhof = 100. # fluid density
rhos = 110. # solid density
g = -10. # gravity
dp = 0.0005 # particle diameter
mu = 0.0001 # viscosity of the fluid
b= 18 * mu / (rhos * dp**2)
vt = (rhos-rhof) * g * dp**2/18/mu * np / np # transient terminal velocity. Not so sure of the solution here
#Length of the geometry
x=0.05
y=0.05
z=0.25
#Volume of the geometry
vGeom=x*y*z
#Volume of a single particle
vPart = 4*math.pi/3 * (dp/2)**3
#Calculation of the volume fraction
phi = 1- np * vPart/vGeom
w = abs(w)
#plt.rcParams.update({'font.size': 10})
fig = plt.figure()
ax = fig.add_subplot(111) # Create plot object
ax.errorbar(numpy.log(phi),numpy.log(w),yerr=wStd,fmt='ro', label="Average velocity", markeredgewidth=1)
#ax.plot(phi,wMin,'bo', label="Minimal velocity")
#ax.plot(phi,wMax,'go', label="Maximal velocity")
plt.ylabel('Average settling velocity [m/s]')
plt.xlabel('Log Volume Fraction ($\phi$)')
plt.title('Log Average velocity of the particles as a function of the volume fraction of particles')
plt.legend(loc=9)
#plt.yscale('log')
#plt.xscale('log')
a,b = numpy.polyfit(numpy.log(phi),numpy.log(w),1)
ax.plot(numpy.log(phi),numpy.log(phi)*a+b,label="Linear regression")
print "Origin : ", b, " Slope : ", a
x1,x2,y1,y2 = plt.axis()
#plt.axis((0.1,1,y1,y2))
#Change tick sizes
#ax.tick_params('both', length=5, width=2, which='major')
#ax.tick_params('both', length=5, width=2, which='minor')
#Create 5 minor ticks between each major tick
#minorLocator=LogLocator(subs=numpy.linspace(2,10,6,endpoint=False))
#Format the labels
majorFormatter= FormatStrFormatter('%5.4f')
#Apply locator
#ax.xaxis.set_minor_locator(minorLocator)
#Modify y fontsizpythoe
#pylab.yticks(fontsize=40)
#matplotlib.rc('ytick.major', size=100)
plt.show()
| lgpl-3.0 |
kazemakase/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
pravsripad/mne-python | mne/decoding/ems.py | 12 | 7624 | # Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..io.pick import _picks_to_idx
from ..parallel import parallel_func
from ..utils import logger, verbose
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note:: EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. footbibliography::
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : instance of EMS
Returns self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, cv=None,
verbose=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, default None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
%(picks_good_data)s
%(n_jobs)s
cv : cross-validation object | str | None, default LeaveOneOut
The cross-validation scheme.
%(verbose)s
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. footbibliography::
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
picks = _picks_to_idx(epochs.info, picks)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data(picks=picks)
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause |
johnmgregoire/JCAPRamanDataProcess | PlateAlignViaEdge_v8.py | 1 | 17031 | import sys,os, pickle, numpy, pylab, operator, itertools
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(jcapdataprocesspath)
from VisualizeDataApp import visdataDialog
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
import numpy as np
###############UPDATE THIS TO BE THE FOLDER CONTAINING parameters.py
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.parseui=dataparseDialog(self, title='Visualize ANA, EXP, RUN data', **kwargs)
self.alignui=plateimagealignDialog(self, manual_image_init_bool=False)
if execute:
self.parseui.exec_()
def doNMF(datan,n_components=4):
# from Mitsu
#alternatively PCA ... might me faster
nmf=NMF(n_components=n_components,init='nndsvd')
data_decomp_all=nmf.fit_transform(datan)
data_components_all=nmf.components_
return data_decomp_all,data_components_all
def rgb_comp(arr2d, affine=True):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(a) for a in arr2d])
def imGen(data_decomp_all,ramaninfod,cmykindeces=[3, 2, 1, 0]):
cmykvals=copy.copy(data_decomp_all[:, cmykindeces])
cmykvals/=cmykvals.max(axis=0)[numpy.newaxis, :]
img=numpy.reshape(rgb_comp(cmykvals), (ramaninfod['xshape'], ramaninfod['yshape'], 3))
return img
def findEdges(img_gray, sigma = 0.33):
#this uses automatic thresholding from one of the cv2 tutorials
v = np.median(img_gray[img_gray>0])
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(np.uint8(img_gray),lower,upper)
return edges
def findContours(edges):
#the contours are now found by searching the most external convex hull
#this way mos of the not fully closed samples are detected as well
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
iWithContour = cv2.drawContours(edges, contours, -1, (255,20,100), 5)
mapimage = np.zeros_like(edges)
#this fills the contours
for i in range(len(contours)):
cv2.drawContours(mapimage, contours, i, color=255, thickness=-1)
#this is to calculate the center of each contour
x=[]
y=[]
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
try:
x.append(M['m10']/(M['m00']))
y.append(M['m01']/(M['m00']))
except:
#this was nessesary as the divisor is sometimes 0
#yield good results but should be done with caution
x.append(M['m10']/(M['m00']+1e-23))
y.append(M['m01']/(M['m00']+1e-23))
return iWithContour, mapimage, contours, x, y
def ramanimshow(im, **kwargs):
plt.imshow(im, origin='lower', interpolation='none', aspect=1, extent=extent, **kwargs)
#plt.clf()
#rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
#for i, arr in enumerate(data_decomp_all[:, :3].T):
# arr[arr!=0]=numpy.log10(arr[arr!=0])
# rgbimagedata[:, :, i]=np.array([ramanreshape(arr/arr.max())])
#ramanimshow(rgbimagedata)
#plt.show()
def save_raman_udi(visui,pathd,udi_ternary_projection_inds,plateidstr,saveall=True):
visui.openontheflyfolder(folderpath=pathd['spectrafolder'], plateidstr=plateidstr)
visui.BatchComboBox.setCurrentIndex(2)
visui.runbatchprocess()
if saveall:
savep=pathd['udibasepath']+'all.udi'
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=udi_ternary_projection_inds, savep=savep)
numelsincompspacebeforeternaryprojection=visui.fomplotd['comps'].shape[1]
if numelsincompspacebeforeternaryprojection>3:
for i, indstup in enumerate(itertools.combinations(range(numelsincompspacebeforeternaryprojection), 3)):
excludeinds=[ind for ind in range(numelsincompspacebeforeternaryprojection) if not ind in indstup]
inds_where_excluded_els_all_zero=numpy.where(visui.fomplotd['comps'][:, excludeinds].max(axis=1)==0)[0]
if len(inds_where_excluded_els_all_zero)==0:
continue
smplist=[visui.fomplotd['sample_no'][fomplotind] for fomplotind in inds_where_excluded_els_all_zero]
visui.remallsamples()
visui.addrem_select_fomplotdinds(remove=False, smplist=smplist)
savep=''.join([pathd['udibasepath']]+[visui.ellabels[ind] for ind in indstup]+['.udi'])
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=indstup, savep=savep)
#errorattheend
if __name__=='__main__':
paramsfolder=r'K:\users\hte\Raman\40374\20170630analysis'
#paramsfolder=r'K:\users\hte\Raman\33444\20170608analysis'
#if not paramsfolder is None:
sys.path.append(paramsfolder)
from parameters import *
platemappath=getplatemappath_plateid(plateidstr)
if not os.path.isdir(pathd['mainfolder']):
print 'NOT A VALID FOLDER'
if not os.path.isdir(pathd['savefolder']):
os.mkdir(pathd['savefolder'])
if not os.path.isdir(pathd['spectrafolder']):
os.mkdir(pathd['spectrafolder'])
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
#form.show()
#form.setFocus()
#mainapp.exec_()
parseui=form.parseui
alignui=form.alignui
parseui.rawpathLineEdit.setText(pathd['ramanfile'])
parseui.infopathLineEdit.setText(pathd['infopck'])
parseui.getinfo(ramaninfop=pathd['infopck'], ramanfp=pathd['ramanfile'])#opens or creates
alignui.motimage_sample_marker_color=motimage_sample_marker_color
parseui.OutlierAveDoubleSpinBox.setValue(raman_spectrum_outlier_fraction_removal)
if os.path.isfile(pathd['allspectra']):
with open(pathd['allspectra'], mode='rb') as f:
fullramandataarray=numpy.load(f)
elif 1:
fullramandataarray=parseui.readfullramanarray(pathd['ramanfile'])#opens or creates
with open(pathd['allspectra'], mode='wb') as f:
numpy.save(f, fullramandataarray)
ramaninfod=parseui.ramaninfod
#parseui.exec_()
#ramaninfod['number of spectra']
#ramaninfod['xdata']
#ramaninfod['ydata']
#ramaninfod['Wavenumbers_str']
#ramaninfod['Spectrum 0 index']
ramaninfod['xdata']/=1000.
ramaninfod['ydata']/=1000.#convert to mm
ramaninfod['xshape']= len(np.unique(ramaninfod['xdata']))
ramaninfod['yshape']= len(np.unique(ramaninfod['ydata']))
ramaninfod['dx']= (ramaninfod['xdata'].max()-ramaninfod['xdata'].min())/(ramaninfod['xshape']-1)
ramaninfod['dy']= (ramaninfod['ydata'].max()-ramaninfod['ydata'].min())/(ramaninfod['yshape']-1)
nx=dx_smp/ramaninfod['dx']
ny=dy_smp/ramaninfod['dy']
ntot=nx*ny
ramanreshape=lambda arr: np.reshape(arr, (ramaninfod['xshape'], ramaninfod['yshape'])).T[::-1, ::-1]
ramannewshape=(ramaninfod['yshape'], ramaninfod['xshape'])
image_of_x=ramanreshape(ramaninfod['xdata'])
image_of_y=ramanreshape(ramaninfod['ydata'])
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].min(), ramaninfod['ydata'].max()]
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].max(), ramaninfod['ydata'].min()]
extent=[image_of_x[0, 0], image_of_x[-1, -1], image_of_y[0, 0], image_of_y[-1, -1]]
if os.path.isfile(pathd['nmfdata']):
with open(pathd['nmfdata'], mode='rb') as f:
tempd=pickle.load(f)
data_decomp_all,data_components_all,rgbimagedata=[tempd[k] for k in 'data_decomp_all,data_components_all,rgbimagedata'.split(',')]
else:
data_decomp_all,data_components_all = doNMF(fullramandataarray,4)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
for i, arr in enumerate(data_decomp_all[:, :3].T):
if nmf_scaling_algorithm_for_image=='scale_by_max':
arr/=arr.max()
elif nmf_scaling_algorithm_for_image=='scale_log_by_max':
arr[arr!=0]=numpy.log10(arr[arr!=0])
arr/=arr.max()
rgbimagedata[:, :, i]=np.array([ramanreshape(arr)])
tempd={}
tempd['data_decomp_all']=data_decomp_all
tempd['data_components_all']=data_components_all
tempd['rgbimagedata']=rgbimagedata
with open(pathd['nmfdata'], mode='wb') as f:
tempd=pickle.dump(tempd, f)
if 1 and os.path.isfile(pathd['blobd']):
with open(pathd['blobd'], mode='rb') as f:
blobd=pickle.load(f)
else:
edges = np.zeros(ramannewshape, dtype='uint8')
searchforoptimalbool=isinstance(find_edges_sigma_value, list)
ltemp=find_edges_sigma_value if searchforoptimalbool else [find_edges_sigma_value]
plt.clf()
for sigmacount, sigmaval in enumerate(ltemp):
if searchforoptimalbool:
plt.subplot(2, len(find_edges_sigma_value), sigmacount+1)
plt.title('edges for sigma %.2f' %sigmaval)
for i in range(data_decomp_all.shape[1]):
if nmf_scaling_algorithm_for_edge=='scale_by_max':
datadecomptemp=data_decomp_all[:,i]/data_decomp_all[:,i].max()
elif nmf_scaling_algorithm_for_edge=='scale_log_by_max':
datadecomptemp=data_decomp_all[:,i]
datadecomptemp[datadecomptemp!=0]=numpy.log10(datadecomptemp[datadecomptemp!=0])
datadecomptemp/=datadecomptemp.max()
arr=np.uint8(ramanreshape(datadecomptemp)*254)
edgetemp=findEdges(arr, sigma=sigmaval)
# plt.imshow(edgetemp)
# plt.show()
edges[np.where(edgetemp>0)] = 244
ramanimshow(edges)
if searchforoptimalbool:
plt.subplot(2, len(find_edges_sigma_value), len(find_edges_sigma_value)+sigmacount+1)
plt.title('mapfill for sigma %.2f' %sigmaval)
else:
plt.savefig(pathd['edges'])
plt.clf()
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
image_of_inds=ramanreshape(numpy.arange(ramaninfod['number of spectra']))
mapfill = np.zeros(ramannewshape, dtype='uint8')
blobd={}
l_mask=[cv2.drawContours(np.zeros(ramannewshape, dtype='uint8'), contours, i, color=1, thickness=-1) for i in range(len(contours))]
l_imageinds=[numpy.where(maski==1) for maski in l_mask]
l_xycen=np.array([[image_of_x[imageindsi].mean(), image_of_y[imageindsi].mean()] for imageindsi in l_imageinds])
indstomerge=sorted([(count2+count+1, count) for count, xy0 in enumerate(l_xycen[:-1]) for count2, xy1 in enumerate(l_xycen[count+1:]) if ((xy0-xy1)**2).sum()<(dx_smp**2+dy_smp**2)/5.])[::-1]
#indstomerge has highest index first so merge going down
for indhigh, indlow in indstomerge:
# imageinds=l_imageinds.pop(indhigh)
# mask=l_mask.pop(indhigh)
imageinds=l_imageinds[indhigh]
mask=l_mask[indhigh]
l_mask[indlow][imageinds]=1#update only the masks and then update everythign else afterwards
l_imageinds=[numpy.where(maskj==1) for maskj in l_mask]
l_xycen=np.array([[image_of_x[imageindsj].mean(), image_of_y[imageindsj].mean()] for imageindsj in l_imageinds])
for imageinds, mask in zip(l_imageinds, l_mask):
indsinblob=sorted(list(image_of_inds[imageinds]))
relx=(image_of_x[imageinds].max()-image_of_x[imageinds].min())/dx_smp
rely=(image_of_y[imageinds].max()-image_of_y[imageinds].min())/dy_smp
if relx<0.5 or relx>1.4 or rely<0.5 or rely>1.4 or len(indsinblob)<ntot*0.5 or len(indsinblob)>ntot*1.5:
print 'skipped blob that was %.2f, %.2f of expected size with %d pixels' %(relx, rely, len(indsinblob))
continue
if numpy.any(mapfill[imageinds]==1):
print 'overlapping blobs detected'
xc=image_of_x[imageinds].mean()
yc=image_of_y[imageinds].mean()
mapfill[imageinds]=1
blobd[(xc, yc)]=indsinblob
ramanimshow(mapfill)
if searchforoptimalbool:
plt.show()
else:
plt.savefig(pathd['mapfill'])
if show_help_messages:
messageDialog(form, 'The auto detected and cleaned up blobs will be shown.\nThis is an image using the Raman motor coordinates').exec_()
plt.show()
with open(pathd['blobd'], mode='wb') as f:
pickle.dump(blobd, f)
if force_new_alignment or not os.path.isfile(pathd['map']):
alignui.knownblobsdict=blobd
alignui.openAddFile(p=platemappath)
alignui.image=rgbimagedata
alignui.motimage_extent=extent #left,right,bottom,top in mm
alignui.reloadimagewithextent()
#alignui.plotw_motimage.axes.imshow(alignui.image, origin='lower', interpolation='none', aspect=1, extent=alignui.motimage_extent)
xarr, yarr=np.array(blobd.keys()).T
alignui.plotw_motimage.axes.plot(xarr, yarr, 'wx', ms=4)
alignui.plotw_motimage.fig.canvas.draw()
if show_help_messages:
messageDialog(form, 'NMF analysis done and now plotting NMF image\nwith identified samples marked +. User can choose sample_no and \nright click to add calibration points.\nDo this for at least 1 sample marked with +.').exec_()
alignui.exec_()
alignui.sampleLineEdit.setText(','.join(['%d' %smp for smp in sample_list]))
alignui.addValuesSample()
if show_help_messages:
messageDialog(form, 'sample_no for export have been added. Check that \nthere are no NaN and if there are manually add calibration points\nas necessary and then remove+re-add the NaN samples.').exec_()
alignui.exec_()
alignui.plotw_motimage.fig.savefig(pathd['alignedsamples'])
with open(pathd['alignedsamplestxt'], mode='w') as f:
f.write(str(alignui.browser.toPlainText()))
alignui.openpckinfo(p=pathd['infopck'])
alignui.infox/=1000.
alignui.infoy/=1000.
alignui.perform_genmapfile(p=pathd['map'], **default_sample_blob_dict)
mapfill2=np.zeros(ramaninfod['number of spectra'], dtype='uint8')
for smp, inds in alignui.smp_inds_list__map:
mapfill2[inds]=2 if smp>0 else 1
mapfill2=ramanreshape(mapfill2)
plt.clf()
ramanimshow(mapfill2, vmin=0, vmax=2, cmap='gnuplot')
plt.savefig(pathd['samplepixels'])
if show_help_messages:
messageDialog(form, 'The NMF-identified samples use custom blob shapes and\nthe rest of the requested samples use default sample shape, resulting\nin the following map of pixels that will be exported.').exec_()
plt.show()
parseui.savepathLineEdit.setText(pathd['spectrafolder'])
parseui.match(copypath=pathd['map'])
parseui.extract()
parseui.saveave()
#parseui.readresultsfolder()
if show_help_messages:
messageDialog(form, 'The .rmn files have now been saved, so you can use\nthis next dialog to visualize data or close it to generate\nthe .udi files and open in JCAPDataProcess Visualizer').exec_()
parseui.exec_()
#only initialize visdataDialog so only created when necessary
visui=visdataDialog(form, title='Visualize ANA, EXP, RUN data')
saveudi(visui,pathd,udi_ternary_projection_inds,plateidstr)
if show_help_messages:
messageDialog(form, 'udi files now saved and JCAPDataProcess\nVisualizer will be opened for your use.').exec_()
visui.exec_()
if show_help_messages:
messageDialog(form, 'There is nothing more to do and continuing will raise an error.').exec_()
| bsd-3-clause |
djgagne/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
StanczakDominik/NumericalSchrodinger | SchrodingerEigenvectorsAnimated.py | 1 | 2104 | # Schrodinger 1D equation solved by finding eigenvectors of the hamiltonian
import numpy as np
import scipy
import scipy.integrate
import scipy.linalg
import scipy.misc
import scipy.special
import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
l = 10
N = 1024
def v(x):
return m * w * x * x / 2
m = 1
w = 1
hbar = 1
X, dx = np.linspace(-l, l, N, retstep=True)
H = np.diag(v(X))
for i in range(N):
H[i, i] += hbar * hbar / m / dx / dx
if i > 0:
H[i - 1, i] -= 0.5 * hbar * hbar / m / dx / dx
if i < N - 1:
H[i + 1, i] -= 0.5 * hbar * hbar / m / dx / dx
E, psi_E = scipy.linalg.eigh(H)
E, psi_E = E[:100], psi_E[:, :100]
for n in range(100):
psi = psi_E[:, n]
norm = np.sqrt(scipy.integrate.simps(psi ** 2, X))
psi /= norm
def psi_t_parts(psi_input, energy, t_input):
psi_complex = psi_input * np.exp(-1j * energy * t_input / hbar)
return np.real(psi_complex), np.imag(psi_complex)
N_lines=10
psi_list = psi_E[:, :N_lines]
E_list = E[:N_lines]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
lines=[ax.plot(X, np.zeros_like(X), np.zeros_like(X))[0] for n in range(N_lines)]
def init():
for line in lines:
line.set_data([], [])
line.set_3d_properties([])
return lines
frame_number=5000
def animate(i, lines, dummy):
t=4*np.pi*i/frame_number
for index in range(N_lines):
line=lines[index]
psi = psi_list[:, index]
psi_r, psi_i = psi_t_parts(psi, E_list[index], t)
line.set_data(X,psi_r)
line.set_3d_properties(psi_i)
return lines
ax.set_xlabel("$x$ position")
ax.set_ylabel("Real part")
ax.set_zlabel("Imaginary part")
ax.set_xlim3d(-l, l)
ax.set_ylim3d(-1,1)
ax.set_zlim3d(-1,1)
ax.grid()
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frame_number, fargs=(lines, "dummy"), interval=10, blit=True)
#saves mp4 file
#mywriter = animation.MencoderWriter()
#anim.save('wavefunction_animation.mp4', writer=mywriter, extra_args=['-vcodec', 'libx264'])
plt.show() | mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.py | 10 | 46541 | """
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| mit |
khkaminska/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
Hiyorimi/scikit-image | skimage/io/manage_plugins.py | 14 | 10495 | """Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
import sys
if sys.version.startswith('3'):
from configparser import ConfigParser # Python 3
else:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib'],
'imshow_collection': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'imshow_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imshow_collection',
'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins are shown in the "
"`skimage.io` docstring.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection', 'imshow_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
| bsd-3-clause |
jowr/le-logger | webapp/database.py | 1 | 3406 | from sqlalchemy import Column, Float, Integer, String, DateTime, ForeignKey
from sqlalchemy import create_engine, and_, func
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, reconstructor
import datetime, random
import numpy as np
import pandas as pd
def DUMMY_DATA_RND(max_value, points):
rng = range(int(max_value))
return [random.choice(rng) for r in range(int(points))]
def DUMMY_DATA_SRT(max_value, points):
return sorted(DUMMY_DATA_RND(max_value, points))
Base = declarative_base()
class Campaign(Base):
__tablename__ = 'campaigns'
id = Column(Integer, primary_key=True)
name = Column(String(250))
desc = Column(String(1000))
class DataSet(Base):
__tablename__ = 'datasets'
id = Column(Integer, primary_key=True)
name = Column(String(250))
time_series = Column(postgresql.ARRAY(DateTime))
temp_series = Column(postgresql.ARRAY(Float))
humi_series = Column(postgresql.ARRAY(Float))
campaign_id = Column(Integer, ForeignKey('campaigns.id'))
# campaign = relationship("Campaign")#, back_populates="datasets")
@reconstructor
def init_on_load(self):
self.time_series = np.asanyarray(self.time_series)
self.temp_series = np.asanyarray(self.temp_series)
self.humi_series = np.asanyarray(self.humi_series)
def as_data_frame(self):
data_frame = pd.DataFrame(columns=['timestamp', 'temperature', 'humidity'])
data_frame['timestamp'] = self.time_series
data_frame['temperature'] = self.temp_series
data_frame['humidity'] = self.humi_series
#data_frame['weekday'] = data_frame['datetime'].dt.dayofweek
#data_frame['time'] = data_frame['datetime'].dt.time
#data_frame['hour'] = data_frame.time.dt.hour
#hours_filter = (data_frame.time.dt.hour >= 15) & (data_frame.time.dt.hour <= 21)
return data_frame
def set_dummy_data(self, max_value=100, points=10):
self.id = 0
self.name = "Dummy name {0}".format(DUMMY_DATA_RND(1e5, 1)[0])
self.time_series = np.asanyarray(DUMMY_DATA_SRT(max_value, points))
self.temp_series = np.asanyarray(DUMMY_DATA_SRT(max_value, points))
self.humi_series = np.asanyarray(DUMMY_DATA_SRT(max_value, points))
#Campaign.datasets = relationship("DataSet", order_by=DataSet.id, back_populates="campaign")
def create_models_engine(PGDB_URI, echo=False):
engine = create_engine(PGDB_URI, connect_args={'sslmode':'require'}, echo=echo)
Base.metadata.create_all(engine)
return engine
#DBSession = sessionmaker(bind=engine)
#session = DBSession()
#testcases = [{"numbers": [25, 33, 42, 55], "name": "David"}, {"numbers": [11, 33, 7, 19 ], "name": "Salazar"}, {"numbers": [32, 6, 20, 23 ], "name": "Belinda"}, {"numbers": [19, 20, 27, 8 ], "name": "Casey"}, {"numbers": [25, 31, 10, 40 ], "name": "Kathie"}, {"numbers": [25, 20, 40, 39 ], "name": "Dianne"}, {"numbers": [1, 20, 18, 38 ], "name": "Cortez"} ]
#for t in testcases:
# session.add(TestUser(name=t['name'], numbers=t['numbers']))
#session.commit()
def get_campaign_and_data(session, campaign_name):
ca = session.query(Campaign).filter(Campaign.name == campaign_name).one()
ds_s = session.query(DataSet).filter(DataSet.campaign_id == ca.id).all()
return ca, ds_s | gpl-3.0 |
imamol555/Machine-Learning | knn_math_iris.py | 1 | 1355 | from sklearn.datasets import load_iris
iris = load_iris()
from scipy.spatial import distance
def euc(a,b):
return distance.euclidean(a,b)
class MyKNN():
def fit(self,X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self,X_test):
predictions = []
for row in X_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self,row):
best_dist = euc(row,self.X_train[0])
best_index = 0
for i in range(1,len(self.X_train)):
dist = euc(row,self.X_train[i])
if(dist < best_dist):
best_dist = dist
best_index = i
return self.y_train[best_index]
X = iris.data
y = iris.target
#split the data into two halves- train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
# create a model
clf = MyKNN()
#from sklearn.neighbors import KNeighborsClassifier
#clf = KNeighborsClassifier() # accuracy 0.9466
clf.fit(X_train,y_train)
#predict for test data
predictions = clf.predict(X_test)
#calculate the accuracy
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,predictions))
| mit |
Vimos/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
hesseltuinhof/mxnet | example/reinforcement-learning/ddpg/strategies.py | 15 | 1705 | import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
sephalon/pylibtiff | libtiff/script_options.py | 13 | 4443 |
__all__ = ['set_formatter', 'set_info_options', 'set_convert_options']
import os
from optparse import OptionGroup, NO_DEFAULT
from optparse import TitledHelpFormatter
try:
import wx
have_wx = True
except ImportError:
have_wx = False
class MyHelpFormatter(TitledHelpFormatter):
def format_option(self, option):
old_help = option.help
default = option.default
if isinstance (default, str) and ' ' in default:
default = repr (default)
if option.help is None:
option.help = 'Specify a %s.' % (option.type)
if option.type=='choice':
choices = []
for choice in option.choices:
if choice==option.default:
if ' ' in choice:
choice = repr(choice)
choice = '['+choice+']'
else:
if ' ' in choice:
choice = repr(choice)
choices.append (choice)
option.help = '%s Choices: %s.'% (option.help, ', '.join(choices))
else:
if default != NO_DEFAULT:
if option.action=='store_false':
option.help = '%s Default: %s.'% (option.help, not default)
else:
option.help = '%s Default: %s.'% (option.help, default)
result = TitledHelpFormatter.format_option (self, option)
option.help = old_help
return result
help_formatter = MyHelpFormatter()
def set_formatter(parser):
"""Set customized help formatter.
"""
parser.formatter = help_formatter
def set_convert_options(parser):
set_formatter(parser)
if os.name == 'posix':
try:
import matplotlib
matplotlib.use('GTkAgg')
parser.run_methods = ['subcommand']
except ImportError:
pass
parser.set_usage ('%prog [options] -i INPUTPATH [-o OUTPUTPATH]')
parser.set_description('Convert INPUTPATH to OUTPUTPATH.')
parser.add_option ('--input-path', '-i',
type = 'file' if have_wx else str, metavar='INPUTPATH',
help = 'Specify INPUTPATH.'
)
parser.add_option ('--output-path', '-o',
type = 'file' if have_wx else str, metavar='OUTPUTPATH',
help = 'Specify OUTPUTPATH.'
)
parser.add_option ('--compression',
type = 'choice', default='none',
choices = ['none', 'lzw'],
help = 'Specify compression.'
)
parser.add_option ('--slice',
type = 'string',
help = 'Specify slice using form "<zstart>:<zend>,<ystart>:<yend>,<xstart>:<xend>"'
)
def set_info_options(parser):
set_formatter(parser)
if os.name == 'posix':
try:
import matplotlib
matplotlib.use('GTkAgg')
parser.run_methods = ['subcommand']
except ImportError:
pass
parser.set_usage ('%prog [options] -i INPUTPATH')
parser.set_description('Show INPUTPATHs information.')
parser.add_option ('--input-path', '-i',
type = 'file' if have_wx else str, metavar='INPUTPATH',
help = 'Specify INPUTPATH.'
)
parser.add_option ('--memory-usage',
action = 'store_true', default=False,
help = 'Show TIFF file memory usage.'
)
parser.add_option ('--no-memory-usage', dest='memory_usage',
action = 'store_false',
help = 'See --memory-usage.'
)
parser.add_option ('--ifd',
action = 'store_true', default=False,
help = 'Show all TIFF file image file directory. By default, only the first IFD is shown.'
)
parser.add_option ('--no-ifd', dest='ifd',
action = 'store_false', help='See --ifd.')
parser.add_option ('--human',
action = 'store_true', default=False,
help = 'Show human readable values'
)
parser.add_option ('--no-human', dest='human',
action = 'store_false', help='See --human.')
| bsd-3-clause |
waterponey/scikit-learn | sklearn/feature_extraction/hashing.py | 74 | 6153 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
beepee14/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
jseabold/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 13 | 10007 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
westurner/dotfiles | scripts/setup_scipy_deb.py | 1 | 2526 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""
deb_scipy : symlink numpy and scipy in from debian packages
"""
import os
import sys
import logging
def apt_install_numpy_scipy():
cmd = ('sudo','apt-get','install',
'python-numpy',
'python-scipy',
'pyrhon-dateutil',
'python-tz')
subprocess.call(cmd)
def apt_install_matplotlib():
cmd = ('sudo', 'apt-get', 'install',
'python-matplotlib')
subprocess.call(cmd)
# creates:
# /usr/lib/pyshared/python2.7/matplotlib (.so files)
# /usr/share/pyshared/matplotlib (.py files)
# /usr/share/pyshared/mpl_toolkits
# /usr/share/pyshared/pylab.py
# TODO: determine how to symlink these into a virtualenv
def pip_install_matplotlib(eggpath='matplotlib'):
cmd = ('pip','install',eggpath)
subprocess.call(cmd)
def deb_scipy(venv=None, aptget=False):
"""
install numpy and scipy
"""
if aptget:
apt_install_numpy_scipy()
if venv is None:
venv = os.environ['VIRTUAL_ENV']
sysver = '%d.%d' % (sys.version_info[:2])
env = dict(
VIRTUAL_ENV=venv,
sys_shared="/usr/share/pyshared",
sys_pkgs="/usr/lib/python%s/dist-packages" % sysver,
site_pkgs=os.path.join(venv,
'lib/python%s/site-packages' % sysver),
)
pkgs = ['numpy','scipy','dateutil','pytz']
for pkg in pkgs:
src=os.path.join(env['sys_pkgs'],pkg)
dst=os.path.join(env['site_pkgs'],pkg)
logging.info("symlinking from %r to %r" % (src,dst))
os.remove(dst)
# *nix only
os.link(src, dst)
def main():
import optparse
import logging
prs = optparse.OptionParser(usage="./%prog : args")
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
(opts, args) = prs.parse_args()
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
import sys
sys.argv = [sys.argv[0]] + args
import unittest
exit(unittest.main())
deb_scipy()
pip_install_matplotlib()
if __name__ == "__main__":
main()
| bsd-3-clause |
aponom84/MetrizedSmallWorld | hist.py | 1 | 1556 | # -*- coding: utf-8 -*-
import csv
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
#fldName = 'greedyWalkPathLenght'
#inFileName = 'commonFeatures_erdesh.csv'
inFileName = 'commonFeatures_sp_erdesh.csv'
fldName = 'sp'
outFileName = fldName + "_distr.csv"
numbers=[]
#with open('commonFeatures_erdesh.csv', 'rb') as csvfile:
with open(inFileName) as csvfile:
#spreader = csv.reader(csvfile, delimiter=';', quotechar='|')
reader = csv.DictReader(csvfile, delimiter=';', quotechar='|');
for row in reader:
numbers.append(int( row[fldName]))
print len(numbers)
print numbers[0:20]
myMaxNumber = np.amax(numbers)
print "myMaxNumber is %i" % myMaxNumber
#bins = array.array('i',(i for i in range(0,myMaxNumber)))
bins = [i for i in xrange(myMaxNumber+1)]
n, bins, patches = plt.hist(numbers, bins, normed=1, facecolor='green', alpha=0.5)
print n
with open(outFileName, 'wb') as csvWriterFile:
fieldnames = ['lenght', 'probability']
writer = csv.DictWriter(csvWriterFile, fieldnames=fieldnames,delimiter=';')
writer.writeheader()
for x,y in zip(bins, n):
writer.writerow({'lenght': x, 'probability': str(y).replace(".",",")})
#print "x: %i y: %.5f" % (x,y)
# add a 'best fit' line
#mu=5;
#sigma = 3;
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--')
#plt.xlabel("Smart")
plt.xlabel("Lenght")
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
#plt.show() | lgpl-3.0 |
udrg/rpg_svo | svo_analysis/src/svo_analysis/hand_eye_calib.py | 17 | 7489 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 15:47:55 2013
@author: cforster
"""
import os
import yaml
import numpy as np
import svo_analysis.tum_benchmark_tools.associate as associate
import vikit_py.align_trajectory as align_trajectory
import vikit_py.transformations as transformations
import matplotlib.pyplot as plt
# user config
display = True
dataset_dir = '/home/cforster/catkin_ws/src/rpg_svo/svo_analysis/results/rpg_circle_1'
n_measurements = 400
n_align_sim3 = 600
delta = 50
# load dataset parameters
params = yaml.load(open(os.path.join(dataset_dir, 'dataset_params.yaml')))
# set trajectory groundtruth and estimate file
traj_groundtruth = os.path.join(dataset_dir, 'groundtruth.txt')
traj_estimate = os.path.join(dataset_dir,'traj_estimate.txt')
# load data
data_gt = associate.read_file_list(traj_groundtruth)
data_es = associate.read_file_list(traj_estimate)
# select matches
offset = -params['cam_delay']
print ('offset = '+str(offset))
matches = associate.associate(data_gt, data_es, offset, 0.02)
#matches = matches[500:]
p_gt = np.array([[float(value) for value in data_gt[a][0:3]] for a,b in matches])
q_gt = np.array([[float(value) for value in data_gt[a][3:7]] for a,b in matches])
p_es = np.array([[float(value) for value in data_es[b][0:3]] for a,b in matches])
q_es = np.array([[float(value) for value in data_es[b][3:7]] for a,b in matches])
# --------------------------------------------------------------------------------
# align Sim3 to get scale
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_sim3,:], p_es[0:n_align_sim3,:])
#model_aligned = s * R * model + t
#alignment_error = model_aligned - data
#t_error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),0)).A[0]
p_es_aligned = np.transpose(scale*np.dot(rot,np.transpose(p_es)))+trans
p_es = scale*p_es
print 's='+str(scale)
print 't='+str(trans)
print 'R='+str(rot)
# plot sim3 aligned trajectory
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.plot(p_es_aligned[:,0], p_es_aligned[:,1], 'r-', label='estimate', alpha=0.2)
ax.plot(p_gt[:,0], p_gt[:,1], 'b-', label='groundtruth', alpha=0.2)
ax.plot(p_es_aligned[:n_align_sim3,0], p_es_aligned[:n_align_sim3,1], 'g-', label='aligned', linewidth=2)
ax.plot(p_gt[:n_align_sim3,0], p_gt[:n_align_sim3,1], 'm-', label='aligned', linewidth=2)
ax.legend()
for (x1,y1,z1),(x2,y2,z2) in zip(p_es_aligned[:n_align_sim3:10],p_gt[:n_align_sim3:10]):
ax.plot([x1,x2],[y1,y2],'-',color="red")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(p_gt[:,0], 'r-')
ax.plot(p_gt[:,1], 'g-')
ax.plot(p_gt[:,2], 'b-')
ax.plot(p_es_aligned[:,0], 'r--')
ax.plot(p_es_aligned[:,1], 'g--')
ax.plot(p_es_aligned[:,2], 'b--')
# --------------------------------------------------------------------------------
# hand-eye-calib
# select random measurements
I = np.array(np.random.rand(n_measurements,1)*(np.shape(matches)[0]-delta), dtype=int)[:,0]
R,b = align_trajectory.hand_eye_calib(q_gt, q_es, p_gt, p_es, I, delta, True)
print 'quat = ' + str(transformations.quaternion_from_matrix(transformations.convert_3x3_to_4x4(R)))
print 'b = ' + str(b)
rpy_es = np.zeros([q_es.shape[0]-1, 3])
rpy_gt = np.zeros([q_gt.shape[0]-1, 3])
t_gt = np.zeros([q_es.shape[0]-1,3])
t_es = np.zeros([q_es.shape[0]-1,3])
for i in range(delta,np.shape(q_es)[0]):
A1 = transformations.quaternion_matrix(q_es[i-delta,:])[:3,:3]
A2 = transformations.quaternion_matrix(q_es[i,:])[:3,:3]
A = np.dot(A1.transpose(), A2)
B1 = transformations.quaternion_matrix(q_gt[i-delta,:])[:3,:3]
B2 = transformations.quaternion_matrix(q_gt[i,:])[:3,:3]
B = np.dot(B1.transpose(), B2)
B_es = np.dot(np.transpose(R), np.dot(A, R))
rpy_gt[i-delta,:] = transformations.euler_from_matrix(B, 'rzyx')
rpy_es[i-delta,:] = transformations.euler_from_matrix(B_es, 'rzyx')
t_B = np.dot(np.transpose(B1),(p_gt[i,:]-p_gt[i-delta,:]))
t_A = np.dot(np.transpose(A1),(p_es[i,:]-p_es[i-delta,:]))
t_gt[i-delta,:] = t_B
t_es[i-delta,:] = np.dot(np.transpose(R), np.dot(A,b[:,0]) + t_A - b[:,0])
alignment_error = (t_gt-t_es)
error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),1))
I_accurate = np.argwhere(error < np.percentile(error, 90))[:,0]
if display:
plt.figure()
plt.plot(rpy_es[:,0], 'r-', label='es roll')
plt.plot(rpy_es[:,1], 'g-', label='es pitch')
plt.plot(rpy_es[:,2], 'b-', label='es yaw')
plt.plot(rpy_gt[:,0], 'r--', label='gt roll')
plt.plot(rpy_gt[:,1], 'g--', label='gt pitch')
plt.plot(rpy_gt[:,2], 'b--', label='gt yaw')
plt.legend()
plt.figure()
plt.plot(t_gt[:,0], 'r-', label='gt x')
plt.plot(t_gt[:,1], 'g-', label='gt y')
plt.plot(t_gt[:,2], 'b-', label='gt z')
plt.plot(t_es[:,0], 'r--', label='es x')
plt.plot(t_es[:,1], 'g--', label='es y')
plt.plot(t_es[:,2], 'b--', label='es z')
plt.legend()
plt.figure()
plt.plot(error,'-g')
print 'e_rms = ' + str(np.sqrt(np.dot(error,error) / len(error)))
print 'e_mean = ' + str(np.mean(error))
print 'e_median = ' + str(np.median(error))
print 'e_std = ' + str(np.std(error))
# now sample again from the filtered list:
N = 10
display = False
n_acc_meas = I_accurate.size
n_measurements = 500
#for i in range(5):
# print '-------------------------------------'
# i0 = np.array(rand(n_measurements,1)*np.shape(I_accurate)[0], dtype=int)[:,0]
# i1 = np.minimum(i0+N, n_acc_meas-1)
# I = np.empty(i0.size*2, dtype=int)
# I[0::2] = I_accurate[i0]
# I[1::2] = I_accurate[i1]
# R,b = handEyeCalib(q_gt[I,:], q_es[I,:], p_gt[I,:], p_es[I,:], True)
# print 'quat = ' + str(ru.dcm2quat(R))
# print 'b = ' + str(b)
# rpy_es = np.zeros([q_es.shape[0]-1, 3])
# rpy_gt = np.zeros([q_gt.shape[0]-1, 3])
# t_gt = np.zeros([q_es.shape[0]-1,3])
# t_es = np.zeros([q_es.shape[0]-1,3])
#
# delta = 10
# for i in range(delta,np.shape(q_es)[0]):
# A1 = ru.quat2dcm(q_es[i-delta,:])
# A2 = ru.quat2dcm(q_es[i,:])
# A = np.dot(A1.transpose(), A2)
# B1 = ru.quat2dcm(q_gt[i-delta,:])
# B2 = ru.quat2dcm(q_gt[i,:])
# B = np.dot(B1.transpose(), B2)
# B_es = np.dot(np.transpose(R), np.dot(A, R))
# rpy_gt[i-delta,:] = ru.dcm2rpy(B)
# rpy_es[i-delta,:] = ru.dcm2rpy(B_es)
# t_B = np.dot(np.transpose(B1),(p_gt[i,:]-p_gt[i-delta,:]))
# t_A = np.dot(np.transpose(A1),(p_es[i,:]-p_es[i-delta,:]))
# t_gt[i-delta,:] = t_B
# t_es[i-delta,:] = np.dot(np.transpose(R), np.dot(A,b[:,0]) + t_A - b[:,0])
# alignment_error = (t_gt-t_es)
# error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),1))
#
# if display:
# plt.figure()
# plt.plot(rpy_es[:,0], 'r-', label='es roll')
# plt.plot(rpy_es[:,1], 'g-', label='es pitch')
# plt.plot(rpy_es[:,2], 'b-', label='es yaw')
# plt.plot(rpy_gt[:,0], 'r--', label='gt roll')
# plt.plot(rpy_gt[:,1], 'g--', label='gt pitch')
# plt.plot(rpy_gt[:,2], 'b--', label='gt yaw')
# plt.legend()
# plt.figure()
# plt.plot(t_gt[:,0], 'r-', label='es x')
# plt.plot(t_gt[:,1], 'g-', label='es y')
# plt.plot(t_gt[:,2], 'b-', label='es z')
# plt.plot(t_es[:,0], 'r--', label='gt x')
# plt.plot(t_es[:,1], 'g--', label='gt y')
# plt.plot(t_es[:,2], 'b--', label='gt z')
# plt.legend()
# plt.figure()
# plt.plot(error,'-g')
#
# print 'e_rms = ' + str(np.sqrt(np.dot(error,error) / len(error)))
# print 'e_mean = ' + str(np.mean(error))
# print 'e_median = ' + str(np.median(error))
# print 'e_std = ' + str(np.std(error))
| gpl-3.0 |
cpcloud/blaze | blaze/tests/test_interactive.py | 2 | 12275 | import datetime
import sys
from types import MethodType
from datashape import dshape
import pandas as pd
import pandas.util.testing as tm
import pytest
import numpy as np
from odo import into, append
from odo.backends.csv import CSV
from blaze import discover, transform
from blaze.compatibility import pickle
from blaze.expr import symbol
from blaze.interactive import Data, compute, concrete_head, expr_repr, to_html
from blaze.utils import tmpfile, example
data = (('Alice', 100),
('Bob', 200))
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
t = Data(data, fields=['name', 'amount'])
x = np.ones((2, 2))
def test_table_raises_on_inconsistent_inputs():
with pytest.raises(ValueError):
t = Data(data, schema='{name: string, amount: float32}',
dshape=dshape("{name: string, amount: float32}"))
def test_resources():
assert t._resources() == {t: t.data}
def test_resources_fail():
t = symbol('t', 'var * {x: int, y: int}')
d = t[t['x'] > 100]
with pytest.raises(ValueError):
compute(d)
def test_compute_on_Data_gives_back_data():
assert compute(Data([1, 2, 3])) == [1, 2, 3]
def test_len():
assert len(t) == 2
assert len(t.name) == 2
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Data(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Data(data, fields=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_repr():
result = expr_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = expr_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Data(tuple((i, i**2) for i in range(100)), fields=['x', 'y'])
assert t2.dshape == dshape('100 * {x: int64, y: int64}')
result = expr_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_str_does_not_repr():
# see GH issue #1240.
d = Data([('aa', 1), ('b', 2)], name="ZZZ",
dshape='2 * {a: string, b: int64}')
expr = transform(d, c=d.a.strlen() + d.b)
assert str(
expr) == "Merge(_child=ZZZ, children=(ZZZ, label(strlen(_child=ZZZ.a) + ZZZ.b, 'c')))"
def test_repr_of_scalar():
assert repr(t.amount.sum()) == '300'
def test_mutable_backed_repr():
mutable_backed_table = Data([[0]], fields=['col1'])
repr(mutable_backed_table)
def test_dataframe_backed_repr():
df = pd.DataFrame(data=[0], columns=['col1'])
dataframe_backed_table = Data(df)
repr(dataframe_backed_table)
def test_dataframe_backed_repr_complex():
df = pd.DataFrame([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
columns=['id', 'name', 'balance'])
t = Data(df)
repr(t[t['balance'] < 0])
def test_repr_html_on_no_resources_symbol():
t = symbol('t', '5 * {id: int, name: string, balance: int}')
assert to_html(t) == 't'
def test_expr_repr_empty():
s = repr(t[t.amount > 1e9])
assert isinstance(s, str)
assert 'amount' in s
def test_to_html():
s = to_html(t)
assert s
assert 'Alice' in s
assert '<table' in s
assert to_html(1) == '1'
assert to_html(t.count()) == '2'
def test_to_html_on_arrays():
s = to_html(Data(np.ones((2, 2))))
assert '1' in s
assert 'br>' in s
def test_repr_html():
assert '<table' in t._repr_html_()
assert '<table' in t.name._repr_html_()
def test_into():
assert into(list, t) == into(list, data)
def test_serialization():
import pickle
t2 = pickle.loads(pickle.dumps(t))
assert t.schema == t2.schema
assert t._name == t2._name
def test_table_resource():
with tmpfile('csv') as filename:
ds = dshape('var * {a: int, b: int}')
csv = CSV(filename)
append(csv, [[1, 2], [10, 20]], dshape=ds)
t = Data(filename)
assert isinstance(t.data, CSV)
assert into(list, compute(t)) == into(list, csv)
def test_concretehead_failure():
t = symbol('t', 'var * {x:int, y:int}')
d = t[t['x'] > 100]
with pytest.raises(ValueError):
concrete_head(d)
def test_into_np_ndarray_column():
t = Data(L, fields=['id', 'name', 'balance'])
expr = t[t.balance < 0].name
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_into_nd_array_selection():
t = Data(L, fields=['id', 'name', 'balance'])
expr = t[t['balance'] < 0]
selarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(selarray)
def test_into_nd_array_column_failure():
tble = Data(L, fields=['id', 'name', 'balance'])
expr = tble[tble['balance'] < 0]
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_Data_attribute_repr():
t = Data(CSV(example('accounts-datetimes.csv')))
result = t.when.day
expected = pd.DataFrame({'when_day': [1, 2, 3, 4, 5]})
assert repr(result) == repr(expected)
def test_can_trivially_create_csv_Data():
Data(example('iris.csv'))
# in context
with Data(example('iris.csv')) as d:
assert d is not None
def test_can_trivially_create_csv_Data_with_unicode():
if sys.version[0] == '2':
assert isinstance(Data(example(u'iris.csv')).data, CSV)
def test_can_trivially_create_sqlite_table():
pytest.importorskip('sqlalchemy')
Data('sqlite:///'+example('iris.db')+'::iris')
# in context
with Data('sqlite:///'+example('iris.db')+'::iris') as d:
assert d is not None
@pytest.mark.xfail(sys.platform != 'darwin', reason="h5py/pytables mismatch")
@pytest.mark.skipif(sys.version_info[:2] == (3, 4) and sys.platform == 'win32',
reason='PyTables + Windows + Python 3.4 crashes')
def test_can_trivially_create_pytables():
pytest.importorskip('tables')
with Data(example('accounts.h5')+'::/accounts') as d:
assert d is not None
def test_data_passes_kwargs_to_resource():
assert Data(example('iris.csv'), encoding='ascii').data.encoding == 'ascii'
def test_data_on_iterator_refies_data():
data = [1, 2, 3]
d = Data(iter(data))
assert into(list, d) == data
assert into(list, d) == data
# in context
with Data(iter(data)) as d:
assert d is not None
def test_Data_on_json_is_concrete():
d = Data(example('accounts-streaming.json'))
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
def test_repr_on_nd_array_doesnt_err():
d = Data(np.ones((2, 2, 2)))
repr(d + 1)
def test_generator_reprs_concretely():
x = [1, 2, 3, 4, 5, 6]
d = Data(x)
expr = d[d > 2] + 1
assert '4' in repr(expr)
def test_incompatible_types():
d = Data(pd.DataFrame(L, columns=['id', 'name', 'amount']))
with pytest.raises(ValueError):
d.id == 'foo'
result = compute(d.id == 3)
expected = pd.Series([False, False, True, False, False], name='id')
tm.assert_series_equal(result, expected)
def test___array__():
x = np.ones(4)
d = Data(x)
assert (np.array(d + 1) == x + 1).all()
d = Data(x[:2])
x[2:] = d + 1
assert x.tolist() == [1, 1, 2, 2]
def test_python_scalar_protocols():
d = Data(1)
assert int(d + 1) == 2
assert float(d + 1.0) == 2.0
assert bool(d > 0) is True
assert complex(d + 1.0j) == 1 + 1.0j
def test_iter():
x = np.ones(4)
d = Data(x)
assert list(d + 1) == [2, 2, 2, 2]
@pytest.mark.xfail(
reason="DataFrame constructor doesn't yet support __array__"
)
def test_DataFrame():
x = np.array([(1, 2), (1., 2.)], dtype=[('a', 'i4'), ('b', 'f4')])
d = Data(x)
assert isinstance(pd.DataFrame(d), pd.DataFrame)
def test_head_compute():
data = tm.makeMixedDataFrame()
t = symbol('t', discover(data))
db = into('sqlite:///:memory:::t', data, dshape=t.dshape)
n = 2
d = Data(db)
# skip the header and the ... at the end of the repr
expr = d.head(n)
s = repr(expr)
assert '...' not in s
result = s.split('\n')[1:]
assert len(result) == n
def test_scalar_sql_compute():
t = into('sqlite:///:memory:::t', data,
dshape=dshape('var * {name: string, amount: int}'))
d = Data(t)
assert repr(d.amount.sum()) == '300'
def test_no_name_for_simple_data():
d = Data([1, 2, 3])
assert repr(d) == ' \n0 1\n1 2\n2 3'
assert not d._name
d = Data(1)
assert not d._name
assert repr(d) == '1'
def test_coerce_date_and_datetime():
x = datetime.datetime.now().date()
d = Data(x)
assert repr(d) == repr(x)
x = pd.Timestamp.now()
d = Data(x)
assert repr(d) == repr(x)
x = np.nan
d = Data(x, dshape='datetime')
assert repr(d) == repr(pd.NaT)
x = float('nan')
d = Data(x, dshape='datetime')
assert repr(d) == repr(pd.NaT)
def test_highly_nested_repr():
data = [[0, [[1, 2], [3]], 'abc']]
d = Data(data)
assert 'abc' in repr(d.head())
def test_asarray_fails_on_different_column_names():
vs = {'first': [2., 5., 3.],
'second': [4., 1., 4.],
'third': [6., 4., 3.]}
df = pd.DataFrame(vs)
with pytest.raises(ValueError):
Data(df, fields=list('abc'))
def test_functions_as_bound_methods():
"""
Test that all functions on an InteractiveSymbol are instance methods
of that object.
"""
# Filter out __class__ and friends that are special, these can be
# callables without being instance methods.
callable_attrs = filter(
callable,
(getattr(t, a, None) for a in dir(t) if not a.startswith('__')),
)
for attr in callable_attrs:
assert isinstance(attr, MethodType)
# Make sure this is bound to the correct object.
assert attr.__self__ is t
def test_all_string_infer_header():
data = """x,tl,z
Be careful driving.,hy,en
Be careful.,hy,en
Can you translate this for me?,hy,en
Chicago is very different from Boston.,hy,en
Don't worry.,hy,en"""
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write(data)
data = Data(fn, has_header=True)
assert data.data.has_header
assert data.fields == ['x', 'tl', 'z']
def test_csv_with_trailing_commas():
with tmpfile('.csv') as fn:
with open(fn, 'wt') as f:
# note the trailing space in the header
f.write('a,b,c, \n1, 2, 3, ')
csv = CSV(fn)
assert repr(Data(fn))
assert discover(csv).measure.names == [
'a', 'b', 'c', ''
]
with tmpfile('.csv') as fn:
with open(fn, 'wt') as f:
f.write('a,b,c,\n1, 2, 3, ') # NO trailing space in the header
csv = CSV(fn)
assert repr(Data(fn))
assert discover(csv).measure.names == [
'a', 'b', 'c', 'Unnamed: 3'
]
def test_pickle_roundtrip():
ds = Data(1)
assert ds.isidentical(pickle.loads(pickle.dumps(ds)))
assert (ds + 1).isidentical(pickle.loads(pickle.dumps(ds + 1)))
es = Data(np.array([1, 2, 3]))
assert es.isidentical(pickle.loads(pickle.dumps(es)))
assert (es + 1).isidentical(pickle.loads(pickle.dumps(es + 1)))
def test_nameless_data():
data = [('a', 1)]
assert repr(data) in repr(Data(data))
def test_partially_bound_expr():
df = pd.DataFrame([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
columns=['id', 'name', 'balance'])
data = Data(df, name='data')
a = symbol('a', 'int')
expr = data.name[data.balance > a]
assert repr(expr) == 'data[data.balance > a].name'
| bsd-3-clause |
dice-project/DICE-Monitoring | src/pyUtil.py | 4 | 32334 | '''
Copyright 2015, Institute e-Austria, Timisoara, Romania
http://www.ieat.ro/
Developers:
* Gabriel Iuhasz, [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
import sys
import signal
import subprocess
from datetime import datetime
import time
import requests
import os
import jinja2
from flask import jsonify
from app import *
from dbModel import *
from greenletThreads import *
from urlparse import urlparse
import pandas as pd
import psutil
def portScan(addrs, ports):
'''
Check if a range of ports are open or not
'''
t1 = datetime.now()
for address in addrs:
for port in ports:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockTest = sock.connect_ex((address, int(port)))
if sockTest == 0:
app.logger.info('[%s] : [INFO] Port %s on %s Open',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(port), str(address))
print "Port %s \t on %s Open" % (port, address)
sock.close()
except KeyboardInterrupt:
print "User Intrerupt detected!"
print "Closing ...."
app.logger.info('[%s] : [INFO] User Intrerupt detected. Exiting',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
sys.exit()
except socket.gaierror:
print 'Hostname not resolved. Exiting'
app.logger.warning('[%s] : [WARN] Hostname unresolved. Exiting',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
sys.exit()
except socket.error:
print 'Could not connect to server'
app.logger.warning('[%s] : [WARN] Could not connect to server',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
sys.exit()
#stop time
t2 = datetime.now()
#total time
total = t2 - t1
print 'Scanning Complete in: ', total
app.logger.info('[%s] : [INFO] Scanning Complete in: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), total)
def checkPID(pid):
"""
Check For the existence of a unix pid.
Sending signal 0 to a pid will raise an OSError exception if the pid is not running, and do nothing otherwise.
"""
if pid == 0: #If PID newly created return False
return False
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def startLocalProcess(command):
'''
Starts a process in the background and writes a pid file.
command -> needs to be in the form of a list of basestring
-> 'yes>/dev/null' becomes ['yes','>','/dev/null']
Returns integer: PID
'''
process = subprocess.Popen(command, shell=True)
return process.pid
def checkUnique(nodeList):
'''
Checks for unique values in a dictionary.
'''
seen = {}
result = set()
sameCredentials = []
ipNode = []
for d in nodeList:
for k, v in d.iteritems():
if v in seen:
ipNode.append(k)
sameCredentials.append(v)
result.discard(seen[v])
else:
seen[v] = k
result.add(k)
return list(result), sameCredentials, ipNode
#print {v['nPass']:v for v in test}.values()
#print checkUnique(test)
class AgentResourceConstructor():
uriRoot = '/agent/v1'
uriRoot2 = '/agent/v2'
chck = '/check'
clctd = '/collectd'
logsf = '/lsf'
jmxr = '/jmx'
confr = '/conf'
logsr = '/logs'
deployr = '/deploy'
noder = '/node'
startr = '/start'
stopr = '/stop'
slogs = '/bdp/storm/logs'
shutDown = '/shutdown'
def __init__(self, IPList, Port):
self.IPList = IPList
self.Port = Port
def check(self):
resourceList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.chck)
resourceList.append(resource)
return resourceList
def collectd(self):
cList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.clctd)
cList.append(resource)
return cList
def lsf(self):
lList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.logsf)
lList.append(resource)
return lList
def jmx(self):
jList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.jmxr)
jList.append(resource)
return jList
def deploy(self):
dList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.deployr)
dList.append(resource)
return dList
def node(self):
nList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.noder)
nList.append(resource)
return nList
def start(self):
sList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.startr)
sList.append(resource)
return sList
def stop(self):
stList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.stopr)
stList.append(resource)
return stList
def startSelective(self, comp):
ssList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s/%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.startr, comp)
ssList.append(resource)
return ssList
def stopSelective(self, comp):
stsList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s/%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.stopr, comp)
stsList.append(resource)
return stsList
def logs(self, comp):
logList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s/%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.logsr, comp)
logList.append(resource)
return logList
def conf(self, comp):
confList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s/%s' %(ip, self.Port, AgentResourceConstructor.uriRoot,
AgentResourceConstructor.stopr, comp)
confList.append(resource)
return confList
def stormLogs(self):
logList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot2, AgentResourceConstructor.slogs)
logList.append(resource)
return logList
def shutdownAgent(self):
shutdownList = []
for ip in self.IPList:
resource = 'http://%s:%s%s%s' %(ip, self.Port, AgentResourceConstructor.uriRoot, AgentResourceConstructor.shutDown)
shutdownList.append(resource)
return shutdownList
def dbBackup(db, source, destination, version=1):
'''
:param db: -> database
:param source: -> original name
:param destination: -> new name
:return:
'''
vdest = destination + str(version)
if os.path.isfile(source) is True:
if os.path.isfile(vdest) is True:
return dbBackup(db, source, destination, version + 1)
os.rename(source, destination)
def detectStormTopology(ip, port=8080):
'''
:param ip: IP of the Storm REST API
:param port: Port of the Storm REST API
:return: topology name
'''
url = 'http://%s:%s/api/v1/topology/summary' %(ip, port)
try:
r = requests.get(url, timeout=DMON_TIMEOUT)
except requests.exceptions.Timeout:
app.logger.error('[%s] : [ERROR] Cannot connect to %s timedout',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(url))
raise
except requests.exceptions.ConnectionError:
app.logger.error('[%s] : [ERROR] Cannot connect to %s error',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(url))
raise
topologySummary = r.json()
app.logger.info('[%s] : [INFO] Topologies detected at %s are: %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(ip), str(topologySummary))
return topologySummary.get('topologies')[0]['id']
def validateIPv4(s):
'''
:param s: -> IP as string
:return:
'''
pieces = s.split('.')
if len(pieces) != 4:
return False
try:
return all(0 <= int(p) < 256 for p in pieces)
except ValueError:
return False
def checkStormSpoutsBolts(ip, port, topology):
'''
:param ip: IP of the Storm REST API
:param port: Port of th Storm REST API
:param topology: Topology ID
:return:
'''
ipTest = validateIPv4(ip)
if not ipTest:
return 0, 0
if not port.isdigit():
return 0, 0
url = 'http://%s:%s/api/v1/topology/%s' %(ip, port, topology)
try:
r = requests.get(url, timeout=DMON_TIMEOUT)
except requests.exceptions.Timeout:
app.logger.error('[%s] : [ERROR] Cannot connect to %s timedout',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(url))
return 0, 0
except requests.exceptions.ConnectionError:
app.logger.error('[%s] : [ERROR] Cannot connect to %s error',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(url))
return 0, 0
if r.status_code != 200:
return 0, 0
return len(r.json()['bolts']), len(r.json()['spouts'])
def configureComponent(settingsDict, tmpPath, filePath): #TODO modify /v1/overlord/aux/<auxComp>/config using this function
'''
:param settingsDict: dictionary containing the template information
:param tmpPath: path to template file
:param filePath: path to save config file
:return:
'''
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
try:
template = templateEnv.get_template(tmpPath)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot find %s, with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), filePath,
type(inst), inst.args)
return 0
confInfo = template.render(settingsDict)
confFile = open(filePath, "w+")
confFile.write(confInfo)
confFile.close()
try:
subprocess.Popen('echo >> ' + filePath, shell=True) # TODO fix this
# subprocess.call(["echo", ">>", filePath])
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot find %s, with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), filePath,
type(inst), inst.args)
return 0
return 1
class DetectBDService():
def checkRegistered(self, service):
'''
:param service: Name of BD service role to check for
:param dbNodes: Query for all database nodes
:return:
'''
return 'Check registered %s information' %service
def detectYarnHS(self): #TODO: Document detected at first detection, unchanged if server still responds and updated if it has to be redetected and no longer matches stored values
'''
:param dbNodes: Query for all database nodes
:return:
'''
qNode = dbNodes.query.all()
qDBS = dbBDService.query.first()
if qDBS is not None:
yhUrl = 'http://%s:%s/ws/v1/history/mapreduce/jobs'%(qDBS.yarnHEnd, qDBS.yarnHPort)
try:
yarnResp = requests.get(yhUrl, timeout=DMON_TIMEOUT)
yarnData = yarnResp.json()
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot connect to yarn history service with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
type(inst), inst.args)
yarnData = 0
if yarnData:
rspYarn = {}
rspYarn['Jobs'] = yarnData['jobs']
rspYarn['NodeIP'] = qDBS.yarnHEnd
rspYarn['NodePort'] = qDBS.yarnHPort
rspYarn['Status'] = 'Unchanged'
response = jsonify(rspYarn)
response.status_code = 200
return response
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
yarnNodes = []
for n in qNode:
if "yarn" in n.nRoles:
yarnNodes.append(n.nodeIP)
if not yarnNodes:
response = jsonify({'Status': 'Yarn role not found'})
response.status_code = 404
app.logger.warning('[%s] : [WARNING] No nodes have yarn role',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
resList = []
for n in yarnNodes:
url = 'http://%s:%s/ws/v1/history/mapreduce/jobs' %(n, '19888')
resList.append(url)
app.logger.info('[%s] : [INFO] Resource list for yarn history server discovery -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), resList)
dmonYarn = GreenletRequests(resList)
nodeRes = dmonYarn.parallelGet()
yarnJobs = {}
for i in nodeRes: #TODO: not handled if more than one node resonds as a history server
nodeIP = urlparse(i['Node'])
data = i['Data']
if data !='n/a':
try:
yarnJobs['Jobs'] = data['jobs']['job']
except Exception as inst:
app.logger.warning('[%s] : [WARN] Cannot read job list, with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Cannot read job list'})
response.status_code = 500
return response
yarnJobs['NodeIP'] = nodeIP.hostname
yarnJobs['NodePort'] = nodeIP.port
yarnJobs['Status'] = 'Detected'
if not yarnJobs:
response = jsonify({'Status': 'No Yarn history Server detected'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] No Yarn History Server detected',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qDBS is None:
upBDS = dbBDService(yarnHPort=yarnJobs['NodePort'], yarnHEnd=yarnJobs['NodeIP'])
db.session.add(upBDS)
db.session.commit()
app.logger.info('[%s] : [INFO] Registred Yarn History server at %s and port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), yarnJobs['NodeIP'],
yarnJobs['NodePort'])
else:
qDBS.yarnHEnd = yarnJobs['NodeIP']
qDBS.yarnHPort = yarnJobs['NodePort']
yarnJobs['Status'] = 'Updated'
app.logger.info('[%s] : [INFO] Updated Yarn History server at %s and port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), yarnJobs['NodeIP'],
yarnJobs['NodePort'])
response = jsonify(yarnJobs)
if yarnJobs['Status'] == 'Updated':
response.status_code = 201
else:
response.status_code = 200
return response
def detectStormRS(self, dbNodes):
'''
:param dbNodes: Query for all database nodes
:return:
'''
return 'Detect Storm Rest Service'
def detectSparkHS(self, dbNodes):
'''
:param dbNodes: Query for all database nodes
:return:
'''
qNode = dbNodes.query.all()
qDBS = dbBDService.query.first()
if qDBS is not None:
yhUrl = 'http://%s:%s/api/v1/applications'%(qDBS.sparkHEnd, qDBS.sparkHPort)
try:
sparkResp = requests.get(yhUrl, timeout=DMON_TIMEOUT)
ysparkData = sparkResp.json()
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot connect to spark history service with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
type(inst), inst.args)
ysparkData = 0
if ysparkData:
rspSpark = {}
rspSpark['Jobs'] = ysparkData['jobs']
rspSpark['NodeIP'] = qDBS.sparkHEnd
rspSpark['NodePort'] = qDBS.sparkHPort
rspSpark['Status'] = 'Unchanged'
response = jsonify(rspSpark)
response.status_code = 200
return response
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
sparkNodes = []
for n in qNode:
if "spark" in n.nRoles:
sparkNodes.append(n.nodeIP)
if not sparkNodes:
response = jsonify({'Status': 'Spark role not found'})
response.status_code = 404
app.logger.warning('[%s] : [WARNING] No nodes have spark role',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
resList = []
for n in sparkNodes:
url = 'http://%s:%s/api/v1/applications' %(n, '19888')
resList.append(url)
app.logger.info('[%s] : [INFO] Resource list for spark history server discovery -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), resList)
dmonSpark = GreenletRequests(resList)
nodeRes = dmonSpark.parallelGet()
sparkJobs = {}
for i in nodeRes: #TODO: not handled if more than one node resonds as a history server
nodeIP = urlparse(i['Node'])
data = i['Data']
if data !='n/a':
try:
sparkJobs['Jobs'] = data
except Exception as inst:
app.logger.warning('[%s] : [WARN] Cannot read job list, with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
sparkJobs['NodeIP'] = nodeIP.hostname
sparkJobs['NodePort'] = nodeIP.port
sparkJobs['Status'] = 'Detected'
if qDBS is None:
upBDS = dbBDService(sparkHPort=sparkJobs['NodePort'], sparkHEnd=sparkJobs['NodeIP'])
db.session.add(upBDS)
db.session.commit()
app.logger.info('[%s] : [INFO] Registred Spark History server at %s and port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), sparkJobs['NodeIP'],
sparkJobs['NodePort'])
else:
qDBS.yarnHEnd = sparkJobs['NodeIP']
qDBS.yarnHPort = sparkJobs['NodePort']
sparkJobs['Status'] = 'Updated'
app.logger.info('[%s] : [INFO] Updated Spark History server at %s and port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), sparkJobs['NodeIP'],
sparkJobs['NodePort'])
response = jsonify(sparkJobs)
if sparkJobs['Status'] == 'Updated':
response.status_code = 201
else:
response.status_code = 200
return response
def detectServiceRA(self, service):
return 'Generic detection of services'
def checkCoreState(esPidf, lsPidf, kbPidf): #TODO: works only for local deployment, change for distributed
'''
:param esPidf: Elasticserch PID file location
:param lsPidf: Logstash PID file location
:param kbPidf: Kibana PID file location
'''
qESCore = dbESCore.query.first()
qLSCore = dbSCore.query.first()
qKBCore = dbKBCore.query.first()
if not os.path.isfile(esPidf):
if qESCore is None:
app.logger.warning('[%s] : [WARN] No ES Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file not found, setting pid to 0 for local ES Core')
qESCore.ESCorePID = 0
else:
with open(esPidf) as esPid:
vpid = esPid.read()
app.logger.info('[%s] : [INFO] ES PID read type is %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(vpid))
try:
esStatus = checkPID(int(vpid))
except ValueError:
app.logger.warning('[%s] : [WARN] ES PID read type is %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(vpid))
esStatus = False
if esStatus:
if qESCore is None:
app.logger.warning('[%s] : [WARN] No ES Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for LS Core service with value %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(vpid))
qESCore.ESCorePID = int(vpid)
else:
if qESCore is None:
app.logger.warning('[%s] : [WARN] No ES Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for ES Core service, not service tunning at pid %s. Setting value to 0',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(vpid))
qESCore.ESCorePID = 0
if not os.path.isfile(lsPidf):
if qLSCore is None:
app.logger.warning('[%s] : [WARN] No LS Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file not found, setting pid to 0 for local LS Core',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
qLSCore.LSCorePID = 0
else:
with open(lsPidf) as lsPid:
wpid = lsPid.read()
try:
lsStatus = checkPID(int(wpid))
except ValueError:
app.logger.warning('[%s] : [WARN] LS PID read type is %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(wpid))
lsStatus = False
if lsStatus:
if qLSCore is None:
app.logger.warning('[%s] : [WARN] No LS Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for LS Core service, with value %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(wpid))
qLSCore.LSCorePID = int(wpid)
else:
if qLSCore is None:
app.logger.warning('[%s] : [WARN] No LS Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for ES Core service, not service tunning at pid %s. Setting value to 0',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
qLSCore.LSCorePID = 0
if not os.path.isfile(kbPidf):
if qKBCore is None:
app.logger.warning('[%s] : [WARN] No KB Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file not found, setting pid to 0 for local KB Core')
qKBCore.KBCorePID = 0
else:
with open(kbPidf) as kbPid:
qpid = kbPid.read()
try:
kbStatus = checkPID(int(qpid))
except ValueError:
app.logger.warning('[%s] : [WARN] KB PID read type is %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(qpid))
kbStatus = False
if kbStatus:
if qKBCore is None:
app.logger.warning('[%s] : [WARN] No KB Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for KB Core service, with value %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qpid))
qKBCore.KBCorePID = int(qpid)
else:
if qKBCore is None:
app.logger.warning('[%s] : [WARN] No KB Core service registered to DMon',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
app.logger.info('[%s] : [INFO] PID file found for KB Core service, not service tunning at pid %s. Setting value to 0',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
qKBCore.KBCorePID = 0
def str2Bool(st):
'''
:param st: -> string to test
:return: -> if true then returns 1 else 0
'''
if type(st) is bool:
return st
if st in ['True', 'true', '1']:
return 1
elif st in ['False', 'false', '0']:
return 0
else:
return 0
def csvheaders2colNames(csvfile, adname):
'''
:param csvfile: -> input csv or dataframe
:param adname: -> string to add to column names
:param df: -> if set to false csvfile is used if not df is used
:return:
'''
colNames = {}
if isinstance(csvfile, pd.DataFrame):
for e in csvfile.columns.values:
if e == 'key':
pass
else:
colNames[e] = '%s_%s' % (e, adname)
else:
return 0
return colNames
def check_proc(pidfile, wait=15):
'''
:param pidfile: -> location of pid
:return: -> return pid
'''
tick = 0
time.sleep(wait)
while not os.path.exists(pidfile):
time.sleep(1)
tick += 1
if tick > wait:
return 0
stats_pid = open(pidfile)
try:
pid = int(stats_pid.read())
except ValueError:
return 0
return pid
def check_proc_recursive(pidfile, wait=15):
'''
:param pidfile: -> location of pid
:return: -> return pid
'''
tick = 0
time.sleep(wait)
while not os.path.exists(pidfile):
time.sleep(1)
tick += 1
if tick > wait:
return 0
return recursivePidRead(pidfile, 1)
def recursivePidRead(pidfile, iteration):
stats_pid = open(pidfile)
try:
pid = int(stats_pid.read())
except ValueError:
time.sleep(1)
if iteration > 5:
return 0
else:
recursivePidRead(pidfile, iteration+1)
return pid
def sysMemoryCheck(needStr):
'''
:param needStr: heap size string setting of the format "512m"
:return: returns True or False depending if check is successful or not and returns the final heap size
'''
mem = psutil.virtual_memory().total
need = int(needStr[:-1])
unit = needStr[-1]
if unit == 'm':
hmem = mem / 1024 / 1024
elif unit == 'g':
hmem = mem / 1024 / 1024 / 1024
else:
app.logger.error('[%s] : [ERROR] Unknown heap size format %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), needStr)
hmem = mem / 1024 / 1024
return False, "%s%s" % (str(hmem / 2), 'm')
if need > hmem:
return False, "%s%s" % (str(hmem / 2), unit)
else:
return True, needStr
if __name__ == '__main__':
# db.create_all()
# test = DetectBDService()
# what = test.detectYarnHS()
# print what
test = AgentResourceConstructor(['85.120.206.45', '85.120.206.47', '85.120.206.48', '85.120.206.49'], '5222')
listLogs = test.stormLogs()
print listLogs
# t = test.check()
# c = test.collectd()
# l = test.lsf()
# j = test.jmx()
# d = test.deploy()
# n = test.node()
# s = test.start()
# st = test.stop()
# ss = test.startSelective('lsf')
# sts = test.stopSelective('collectd')
# log = test.logs('lsf')
# conf = test.conf('collectd')
# print t
# print c
# print l
# print j
# print d
# print n
# print s
# print st
# print ss
# print sts
# print log
# print conf | apache-2.0 |
mdeff/ntds_2016 | project/reports/compressed_sensing/utils.py | 1 | 44865 | import scipy.misc
import os
import shutil
import zipfile
import numpy as np
from itertools import product
import pywt
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1
import warnings
import tensorflow as tf
def is_array_str(obj):
"""
Check if obj is a list of strings or a tuple of strings or a set of strings
:param obj: an object
:return: flag: True or False
"""
# TODO: modify the use of is_array_str(obj) in the code to is_array_of(obj, classinfo)
flag = False
if isinstance(obj, str):
pass
elif all(isinstance(item, str) for item in obj):
flag = True
return flag
def is_array_of(obj, classinfo):
"""
Check if obj is a list of classinfo or a tuple of classinfo or a set of classinfo
:param obj: an object
:param classinfo: type of class (or subclass). See isinstance() build in function for more info
:return: flag: True or False
"""
flag = False
if isinstance(obj, classinfo):
pass
elif all(isinstance(item, classinfo) for item in obj):
flag = True
return flag
def check_and_convert_to_list_str(obj):
"""
Check if obj is a string or an array like of strings and return a list of strings
:param obj: and object
:return: list_str: a list of strings
"""
if isinstance(obj, str):
list_str = [obj] # put in a list to avoid iterating on characters
elif is_array_str(obj):
list_str = []
for item in obj:
list_str.append(item)
else:
raise TypeError('Input must be a string or an array like of strings.')
return list_str
def load_images(path, file_ext='.png'):
"""
Load images in grayscale from the path
:param path: path to folder
:param file_ext: a string or a list of strings (even an array like of strings)
:return: image_list, image_name_list
"""
# Check file_ext type
file_ext = check_and_convert_to_list_str(file_ext)
image_list = []
image_name_list = []
for file in os.listdir(path):
file_name, ext = os.path.splitext(file)
if ext.lower() not in file_ext:
continue
# Import image and convert it to 8-bit pixels, black and white (using mode='L')
image_list.append(scipy.misc.imread(os.path.join(path, file), mode='L'))
image_name_list.append(file_name)
return image_list, image_name_list
def extract_zip_archive(zip_file_path, extract_path, file_ext=''):
"""
Extract zip archive. If file_ext is specified, only extracts files with specified extension
:param zip_file_path: path to zip archive
:param extract_path: path to export folder
:param file_ext: a string or a list of strings (even an array like of strings)
:return:
"""
# Check file_ext type
file_ext = check_and_convert_to_list_str(file_ext)
# Check if export_path already contains the files with a valid extension
valid_files_in_extract_path = [os.path.join(root, name)
for root, dirs, files in os.walk(extract_path)
for name in files
if name.endswith(tuple(file_ext))]
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
files_to_extract = [name for name in zip_ref.namelist()
if name.endswith(tuple(file_ext))
and os.path.join(extract_path, name) not in valid_files_in_extract_path]
# Only extracts files if not already extracted
# TODO: load directly the images without extracting them
for file in files_to_extract:
print(file)
zip_ref.extract(file, path=extract_path)
return
def export_image_list(image_list, image_name_list, export_name_list=True, path='', file_ext='.png'):
"""
Export images export_name_list of (image_list, image_name_list) in path as image_name.ext
:param image_list: list of array
:param image_name_list: list of strings
:param export_name_list: True, False, None, string or list of strings
:param path: path to export folder
:param file_ext: file extension
:return:
"""
# Check if file_ext is a string
if not isinstance(file_ext, str):
raise TypeError('File extension must be a string')
# Check if image_name_list is list of string or simple string
image_name_list = check_and_convert_to_list_str(image_name_list)
# Check export_name_list type
# if is True, i.e. will export all images
# Otherwise check if export_name_list is list of strings or simple string
if isinstance(export_name_list, bool):
if export_name_list: # True case
export_name_list = image_name_list
else: # False case
export_name_list = ['']
elif export_name_list is None:
export_name_list = ['']
else:
export_name_list = check_and_convert_to_list_str(export_name_list)
# Check if folder already exists
if os.path.exists(path): # never True if path = ''
# Check if folder content is exactly the same as what will be exported
if not sorted(os.listdir(path)) == [item + file_ext for item in sorted(export_name_list)]:
shutil.rmtree(path)
print('Folder {} has been removed'.format(path))
else:
return
# Check if folder doesn't exist and if path not empty to create the folder
if not os.path.exists(path) and path:
os.makedirs(path)
print('Folder {} has been created'.format(path))
# Save images
for i, image_name in enumerate(image_name_list):
if image_name not in export_name_list:
continue
scipy.misc.imsave(os.path.join(path, image_name + file_ext), image_list[i])
print('Saved {} {} as {}'.format(image_name, image_list[i].shape, os.path.join(path, image_name + file_ext)))
return
def get_data_paths(dataset_name):
"""
Generate and return data paths
:param dataset_name: string
:return: data_paths: dict
"""
if not isinstance(dataset_name, str):
raise TypeError('Data set name must be a string')
keys = ['sources_base', 'source', 'source_archive', 'dataset', 'orig', 'train', 'test']
data_paths = dict.fromkeys(keys)
data_paths['sources_base'] = os.path.join('datasets', 'sources')
data_paths['source'] = os.path.join(data_paths['sources_base'], dataset_name)
data_paths['source_archive'] = data_paths['source'] + '.zip'
data_paths['dataset'] = os.path.join('datasets', dataset_name)
data_paths['orig'] = os.path.join(data_paths['dataset'], 'orig')
data_paths['train'] = os.path.join(data_paths['dataset'], 'train')
data_paths['test'] = os.path.join(data_paths['dataset'], 'test')
return data_paths
def generate_original_images(dataset_name):
"""
Generate original images
:param dataset_name: name of the dataset such that dataset.zip exists
:return:
"""
# TODO: download from the web so it doesn't have to be hosted on github
# Parameters
data_sources_main_path = os.path.join('datasets', 'sources')
data_source_path = os.path.join(data_sources_main_path, dataset_name)
data_source_zip_path = data_source_path + '.zip'
valid_ext = ['.jpg', '.tif', '.tiff', '.png', '.bmp']
export_path = os.path.join('datasets', dataset_name, 'orig')
# Unzip archive
extract_zip_archive(data_source_zip_path, data_sources_main_path, file_ext=valid_ext)
# Loading valid image in grayscale
image_list, image_name_list = load_images(data_source_path, file_ext=valid_ext)
# Export original images
export_image_list(image_list, image_name_list, path=export_path, file_ext='.png')
return
def export_set_from_orig(dataset_name, set_name, name_list):
"""
Export a set from the original set based on the name list provided
:param dataset_name: string, name of the dataset such that dataset.zip exists
:param set_name: string, name of the set (yet only 'train' and 'test')
:param name_list: image name list to extract from the 'orig' set
:return:
"""
# Get paths
data_paths = get_data_paths(dataset_name)
# Load original images
orig_image_list, orig_name_list = load_images(data_paths['orig'], file_ext='.png')
export_image_list(orig_image_list, orig_name_list, export_name_list=name_list,
path=data_paths[set_name], file_ext='.png')
return
def generate_train_images(dataset_name, name_list=None):
"""
Generate training image set from original set
:param dataset_name: string, name of the dataset such that dataset.zip exists
:param name_list: (optional) image name list to extract from the 'orig' set
:return:
"""
# TODO: generalize for different datasets
if name_list is None:
name_list = ['airplane', 'arctichare', 'baboon', 'barbara', 'boat', 'cameraman', 'cat', 'goldhill', 'zelda']
export_set_from_orig(dataset_name, set_name='train', name_list=name_list)
return
def generate_test_images(dataset_name, name_list=None):
"""
Generate testing image set from original set
:param dataset_name: string, name of the dataset such that dataset.zip exists
:param name_list: (optional) image name list to extract from the 'orig' set
:return:
"""
# TODO: generalize for different datasets
if name_list is None:
name_list = ['fruits', 'frymire', 'girl', 'monarch', 'mountain', 'peppers', 'pool', 'sails', 'tulips', 'watch']
export_set_from_orig(dataset_name, set_name='test', name_list=name_list)
return
def extract_2d_patches_old_as_list(image, patch_size):
"""
Extract non-overlapping patches of size patch_height x patch_width
:param image: array, shape = (image_height, image_width)
:param patch_size: tuple of ints (patch_height, patch_width)
:return: patches: list of patches
"""
image_size = np.asarray(image.shape) # convert to numpy array to allow array computations
patch_size = np.asarray(patch_size) # convert to numpy array to allow array computations
if patch_size[0] > image_size[0]:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if patch_size[1] > image_size[1]:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
# Patches number: floor might lead to missing parts if patch_size is a int multiplier of image_size
patches_number = np.floor(image_size / patch_size).astype(int)
patches = []
# Cartesian iteration using itertools.product()
# Equivalent to the nested for loop
# for r in range(patches_number[0]):
# for c in range(patches_number[1]):
for r, c in product(range(patches_number[0]), range(patches_number[1])):
rr = r * patch_size[0]
cc = c * patch_size[1]
patches.append(image[rr:rr + patch_size[0], cc:cc + patch_size[1]])
return patches
def extract_2d_patches(image, patch_size):
"""
Extract non-overlapping patches of size patch_height x patch_width
:param image: array, shape = (image_height, image_width)
:param patch_size: tuple of ints (patch_height, patch_width)
:return: patches: array, shape = (patch_height, patch_width, patches_number)
"""
image_size = np.asarray(image.shape) # convert to numpy array to allow array computations
patch_size = np.asarray(patch_size) # convert to numpy array to allow array computations
if patch_size[0] > image_size[0]:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if patch_size[1] > image_size[1]:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
# Patches number: floor might lead to missing parts if patch_size is a int multiplier of image_size
patches_number = np.floor(image_size / patch_size).astype(int)
# patches = np.zeros([np.prod(patches_number), patch_size[0], patch_size[1]])
patches = np.zeros([patch_size[0], patch_size[1], np.prod(patches_number)])
# Cartesian iteration using itertools.product()
# Equivalent to the nested for loop
# for r in range(patches_number[0]):
# for c in range(patches_number[1]):
for k, (r, c) in zip(range(np.prod(patches_number)), product(range(patches_number[0]), range(patches_number[1]))):
rr = r * patch_size[0]
cc = c * patch_size[1]
# patches[k, :, :] += image[rr:rr + patch_size[0], cc:cc + patch_size[1]]
patches[:, :, k] += image[rr:rr + patch_size[0], cc:cc + patch_size[1]] # TODO: use [..., k]
return patches
def reconstruct_from_2d_patches_old_as_list(patches, image_size):
"""
Reconstruct image from patches of size patch_height x patch_width
:param patches: list of patches
:param image_size: tuple of ints (image_height, image_width)
:return: rec_image: array of shape (rec_image_height, rec_image_width)
"""
image_size = np.asarray(image_size) # convert to numpy array to allow array computations
patch_size = np.asarray(patches[0].shape) # convert to numpy array to allow array computations
if patch_size[0] > image_size[0]:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if patch_size[1] > image_size[1]:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
# Patches number: floor might lead to missing parts if patch_size is a int multiplier of image_size
patches_number = np.floor(image_size / patch_size).astype(int)
rec_image_size = patches_number * patch_size
rec_image = np.zeros(rec_image_size)
# Cartesian iteration using itertools.product()
for patch, (r, c) in zip(patches, product(range(patches_number[0]), range(patches_number[1]))):
rr = r * patch_size[0]
cc = c * patch_size[1]
rec_image[rr:rr + patch_size[0], cc:cc + patch_size[1]] += patch
return rec_image
def reconstruct_from_2d_patches(patches, image_size):
"""
Reconstruct image from patches of size patch_height x patch_width
:param patches: array, shape = (patch_height, patch_width, patches_number)
:param image_size: tuple of ints (image_height, image_width)
:return: rec_image: array of shape (rec_image_height, rec_image_width)
"""
image_size = np.asarray(image_size) # convert to numpy array to allow array computations
# patch_size = np.asarray(patches[0].shape) # convert to numpy array to allow array computations
patch_size = np.asarray(patches[:, :, 0].shape) # convert to numpy array to allow array computations
if patch_size[0] > image_size[0]:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if patch_size[1] > image_size[1]:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
# Patches number: floor might lead to missing parts if patch_size is a int multiplier of image_size
patches_number = np.floor(image_size / patch_size).astype(int)
rec_image_size = patches_number * patch_size
rec_image = np.zeros(rec_image_size)
# Cartesian iteration using itertools.product()
for k, (r, c) in zip(range(np.prod(patches_number)), product(range(patches_number[0]), range(patches_number[1]))):
rr = r * patch_size[0]
cc = c * patch_size[1]
# rec_image[rr:rr + patch_size[0], cc:cc + patch_size[1]] += patches[k, :, :]
rec_image[rr:rr + patch_size[0], cc:cc + patch_size[1]] += patches[:, :, k] # TODO: use [..., k]
return rec_image
def reshape_patch_in_vec(patches):
"""
:param patches: array, shape = (patch_height, patch_width, patches_number)
:return: vec_patches: array, shape = (patch_height * patch_width, patches_number)
"""
# Check if only a single patch (i.e. ndim = 2) or multiple patches (i.e. ndim = 3)
if patches.ndim == 2:
vec_patches = patches.reshape((patches.shape[0]*patches.shape[1]))
elif patches.ndim == 3:
vec_patches = patches.reshape((patches.shape[0]*patches.shape[1], patches.shape[-1]))
else:
raise TypeError('Patches cannot have more than 3 dimensions (i.e. only grayscale for now)')
return vec_patches
def reshape_vec_in_patch(vec_patches, patch_size):
"""
:param vec_patches: array, shape = (patch_height * patch_width, patches_number)
:param patch_size: tuple of ints (patch_height, patch_width)
:return patches: array, shape = (patch_height, patch_width, patches_number)
"""
# Check if vec_patches is 1D (i.e. only one patch) or 2D (i.e. multiple patches)
if vec_patches.ndim == 1:
patches = vec_patches.reshape((patch_size[0], patch_size[1]))
elif vec_patches.ndim == 2:
patches = vec_patches.reshape((patch_size[0], patch_size[1], vec_patches.shape[-1]))
else:
raise TypeError('Vectorized patches array cannot be more than 2D')
return patches
def generate_vec_set(image_list, patch_size):
"""
Generate vectorized set of image based on patch_size
:param image_list: list of array
:param patch_size: tuple of ints (patch_height, patch_width)
:return: vec_set: array, shape = (patch_height * patch_width, n_patches)
"""
patch_list = []
for _, image in enumerate(image_list):
patch_list.append(extract_2d_patches(image, patch_size))
patches = np.concatenate(patch_list, axis=-1)
vec_set = reshape_patch_in_vec(patches)
return vec_set
def generate_cross_validation_sets(full_set, fold_number=5, fold_combination=1):
"""
Generate cross validations sets (i.e train and validation sets) w.r.t. a total fold number and the fold combination
:param full_set: array, shape = (set_dim, set_size)
:param fold_number: positive int
:param fold_combination: int
:return: train_set, val_set
"""
if not isinstance(fold_combination, int):
raise TypeError('Fold combination must be an integer')
if not isinstance(fold_number, int):
raise TypeError('Fold number must be an integer')
if fold_number < 1:
raise ValueError('Fold number must be a postive integer')
if fold_combination > fold_number:
raise ValueError('Fold combination must be smaller or equal to fold number')
if not isinstance(full_set, np.ndarray):
raise TypeError('Full set must be a numpy array')
if full_set.ndim is not 2:
raise TypeError('Full set must be a 2 dimensional array')
patch_number = full_set.shape[1]
fold_len = int(patch_number / fold_number) # int -> floor
val_set_start = (fold_combination - 1) * fold_len
val_set_range = range(val_set_start, val_set_start + fold_len)
train_set_list = [idx for idx in range(fold_number * fold_len) if idx not in val_set_range]
train_set = full_set[:, val_set_range]
val_set = full_set[:, train_set_list]
return train_set, val_set
def create_gaussian_rip_matrix(size=None, seed=None):
"""
Create a Gaussian matrix satisfying the Restricted Isometry Property (RIP).
See: H. Rauhut - Compressive Sensing and Structured Random Matrices
:param size: int or tuple of ints, optional. Default is None
:param seed: int or array_like, optional
:return: matrix: array, shape = (m, n)
"""
m, n = size
mean = 0.0
stdev = 1 / np.sqrt(m)
prng = np.random.RandomState(seed=seed)
matrix = prng.normal(loc=mean, scale=stdev, size=size)
return matrix
def create_bernoulli_rip_matrix(size=None, seed=None):
"""
Create a Bernoulli matrix satisfying the Restricted Isometry Property (RIP).
See: H. Rauhut - Compressive Sensing and Structured Random Matrices
:param size: int or tuple of ints, optional. Default is None
:param seed: int or array_like, optional
:return: matrix: array, shape = (m, n)
"""
m, n = size
prng = np.random.RandomState(seed=seed)
matrix = prng.randint(low=0, high=2, size=size).astype('float') # gen 0, +1 sequence
# astype('float') required to use the true divide (/=) which follows
matrix *= 2
matrix -= 1
matrix /= np.sqrt(m)
return matrix
def create_measurement_model(mm_type, patch_size, compression_percent):
"""
Create measurement model depending on
:param mm_type: string defining the measurement model type
:param patch_size: tuple of ints (patch_height, patch_width)
:param compression_percent: int
:return: measurement_model: array, shape = (m, n)
"""
# TODO: check if seed should be in a file rather than hardcoded
seed = 1234567890
patch_height, patch_width = patch_size
n = patch_height * patch_width
m = round((1 - compression_percent / 100) * n)
if mm_type.lower() == 'gaussian-rip':
measurement_model = create_gaussian_rip_matrix(size=(m, n), seed=seed)
elif mm_type.lower() == 'bernoulli-rip':
measurement_model = create_bernoulli_rip_matrix(size=(m, n), seed=seed)
else:
raise NameError('Undefined measurement model type')
return measurement_model
def generate_transform_dict(patch_size, name, **kwargs):
"""
Create a transform dictionary based on the name and various parameters (kwargs)
:param patch_size: tuple of ints (patch_height, patch_width)
:param name: string
:param kwargs:
:return: transform_dict: dictionary
"""
# TODO: check how to properly document **kwargs
transform_dict = dict()
transform_dict['name'] = name.lower()
transform_dict['patch_size'] = patch_size
if transform_dict['name'] == 'dirac':
transform_dict['level'] = kwargs.get('level')
if transform_dict['level'] is not 0:
warnings.warn('Level of \'dirac\' transform automatically set to 0')
transform_dict['level'] = 0
elif transform_dict['name'] == 'wavelet':
transform_dict['name'] = name
# Wavelet (default = 'db4)
transform_dict['wavelet_type'] = kwargs.get('wavelet', 'db4')
# TODO: check if good idea to add the wavelet object from pywt in the transform dict
transform_dict['wavelet'] = pywt.Wavelet(transform_dict['wavelet_type'])
# Wavelet decomposition level (default = 2)
transform_dict['level'] = kwargs.get('level', 1)
if not isinstance(transform_dict['level'], int):
raise TypeError('Must be an int')
# Check decomposition level if not above max level
check_wavelet_level(size=min(patch_size), dec_len=transform_dict['wavelet'].dec_len,
level=transform_dict['level'])
if transform_dict['level'] < 0:
raise ValueError(
"Level value of %d is too low . Minimum level is 0." % transform_dict['level'])
else:
max_level = pywt.dwt_max_level(min(patch_size), transform_dict['wavelet'].dec_len)
if transform_dict['level'] > max_level:
raise ValueError(
"Level value of %d is too high. Maximum allowed is %d." % (
transform_dict['level'], max_level))
# Wavelet boundaries mode (default = 'symmetric')
transform_dict['mode'] = kwargs.get('mode', 'symmetric')
if not isinstance(transform_dict['mode'], str):
raise TypeError('Must be a string')
else:
raise NotImplementedError('Only supports \'dirac\' or \'wavelet\'')
# Compute transform coefficient number
transform_dict['coeff_number'] = get_transform_coeff_number(transform_dict)
return transform_dict
def check_wavelet_level(size, dec_len, level):
# (see pywt._multilevel._check_level())
if level < 0:
raise ValueError('Level value of {} is too low . Minimum level is 0.'.format(level))
else:
max_level = pywt.dwt_max_level(size, dec_len)
if level > max_level:
raise ValueError('Level value of {} is too high. Maximum allowed is {}.'.format(level, max_level))
return
def generate_transform_list(patch_size, name_list, type_list, level_list, mode_list):
"""
Generate transform list based on name, type, level and mode lists
:param patch_size: tuple of ints (patch_height, patch_width)
:param name_list: list, len = transform_number
:param type_list: list, len = transform_number
:param level_list: list, len = transform_number
:param mode_list: list, len = transform_number
:return: transform_list: list of transform dict, len = transform_number
"""
# Check that input are of type list and are of the same size
input_lists = [name_list, type_list, level_list, mode_list]
input_len = []
input_type_flag = []
for lst in input_lists:
input_len.append(len(lst))
input_type_flag.append(isinstance(lst, list))
if input_type_flag.count(True) is not len(input_type_flag):
raise TypeError('Name, type, level and mode must be lists')
if input_len.count(input_len[0]) is not len(input_len):
raise ValueError('Name, type, level and mode lists must have the same length')
transform_list = []
for nm, wt, lvl, mode in zip(name_list, type_list, level_list, mode_list):
transform_list.append(generate_transform_dict(patch_size, name=nm, wavelet=wt, level=lvl, mode=mode))
return transform_list
def get_transform_coeff_number(transform_dict):
"""
Get transform coefficient number
:param transform_dict: transform dictionary
:return: coeff_number: int
"""
coeff_number = None
# Dirac transform
if transform_dict['name'] == 'dirac':
coeff_number = np.prod(transform_dict['patch_size'])
# Wavelet transform
elif transform_dict['name'] == 'wavelet':
# Wavelet mode: symmetric
# TODO: check documentation which claims to have the same modes as Matlab:
# [link](http://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html)
if transform_dict['mode'] == 'symmetric':
lvl_patch_size = np.asarray(transform_dict['patch_size'], dtype=float)
coeff_number = 0
lvl_coeff_number = lvl_patch_size # for the level=0 case
for lvl in range(transform_dict['level']):
# TODO: make sure that the level patch size used has to be "floored"
lvl_patch_size = np.floor(0.5 * (lvl_patch_size + float(transform_dict['wavelet'].dec_len)))
lvl_coeff_number = lvl_patch_size - 1 # bookkeeping_mat can be deduced here
# print('level coeff number:', lvl_coeff_number)
coeff_number += 3 * np.prod(lvl_coeff_number).astype(int)
# Last (approximated) level, i.e. cAn which has the same size as (cHn, cVn, cDn)
coeff_number += np.prod(lvl_coeff_number).astype(int)
# Wavelet mode: periodization
elif transform_dict['mode'] == 'periodization':
coeff_number = np.prod(transform_dict['patch_size'])
else:
raise NotImplementedError('Only supports \'symmetric\' and \'perdiodization\'')
else:
raise NotImplementedError('Only supports \'dirac\' and \'wavelet\' transform')
return coeff_number
def wavelet_decomposition(patch_vec, transform_dict):
"""
Compute 2D wavelet decomposition of a vectorized patch with respect to the transform parameters (transform_dict)
See Matlab wavedec2 documentation for more information
:param patch_vec: array, shape = (patch_height * patch_width,)
:param transform_dict: transform dictionary
:return: coeffs_vec, bookkeeping_mat: vectorized wavelet coefficients and bookkeeping matrix
"""
patch_mat = reshape_vec_in_patch(patch_vec, transform_dict['patch_size'])
# coeffs are in the shape [cAn, (cHn, cVn, cDn), ..., (cH1, cV1, cD1)] with n the level of the decomposition
coeffs = pywt.wavedec2(patch_mat, wavelet=transform_dict['wavelet'], mode=transform_dict['mode'],
level=transform_dict['level'])
# Vectorize coeffs and compute the corresponding bookkeeping matrix S (see wavedec2 Matlab documentation)
# Initialization
bookkeeping_mat = np.zeros((transform_dict['level'] + 2, 2), dtype=int)
# Approximated level n, i.e. cAn
cAn = coeffs[0]
bookkeeping_mat[0, :] = cAn.shape
coeffs_vec = cAn.reshape(np.prod(cAn.shape))
# From level n to 1, i.e. (cHn, cVn, cDn) -> (cH1, cV1, cD1)
for i, c_lvl in enumerate(coeffs[1:]):
cHn, cVn, cDn = c_lvl
bookkeeping_mat[i + 1, :] = cHn.shape # cHn, cVn and cDn have the same shape
# TODO: check if the concatenation could be safely avoided by pre-computing the final number of coefficients
# Check utils.get_transform_coeff_number()
coeffs_vec = np.concatenate((coeffs_vec, cHn.reshape(np.prod(cHn.shape)))) # tf.concat
coeffs_vec = np.concatenate((coeffs_vec, cVn.reshape(np.prod(cVn.shape))))
coeffs_vec = np.concatenate((coeffs_vec, cDn.reshape(np.prod(cDn.shape))))
# Data shape
bookkeeping_mat[-1, :] = patch_mat.shape
return coeffs_vec, bookkeeping_mat
def wavelet_reconstruction(coeffs_vec, bookkeeping_mat, transform_dict):
"""
Compute 2D wavelet reconstruction of a vectorized set of wavelet coefficients and its corresponding bookkeeping
matrix and the transform parameters (transform_dict)
See Matlab waverec2 documentation for more information
:param coeffs_vec: vectorized wavelet coefficients
:param bookkeeping_mat: bookkeeping matrix
:param transform_dict: transform dictionary
:return: patch_vec: array, shape = (patch_height * patch_width,)
"""
# Recover the coeffs in the shape [cAn, (cHn, cVn, cDn), ..., (cH1, cV1, cD1)] with n the level of the decomposition
coeffs = []
# Approximated level n, i.e. cAn
s_lvl = bookkeeping_mat[0, :]
start_index = 0
coeffs.append(coeffs_vec[start_index: start_index + np.prod(s_lvl)].reshape(s_lvl))
start_index += np.prod(s_lvl)
# From level n to 1, i.e. (cHn, cVn, cDn) -> (cH1, cV1, cD1)
for s_lvl in bookkeeping_mat[1:-1, :]:
cHn = coeffs_vec[start_index: start_index + np.prod(s_lvl)].reshape(s_lvl)
start_index += np.prod(s_lvl)
cVn = coeffs_vec[start_index: start_index + np.prod(s_lvl)].reshape(s_lvl)
start_index += np.prod(s_lvl)
cDn = coeffs_vec[start_index: start_index + np.prod(s_lvl)].reshape(s_lvl)
start_index += np.prod(s_lvl)
coeffs.append((cHn, cVn, cDn))
patch_vec = reshape_patch_in_vec(pywt.waverec2(coeffs, wavelet=transform_dict['wavelet'],
mode=transform_dict['mode']))
return patch_vec
def multiple_transform_decomposition(patch_vec, transform_list):
"""
Perform the decomposition of a patch in a concatenation of transforms
:param patch_vec: array, shape = (patch_height * patch_width,)
:param transform_list: list of transform dict
:return: decomposition_coeff, bookkeeping_mat: list of arrays
see Matlab wavedec2 documentation for more information)
"""
# Check if transform_list is a list of dict
if not is_array_of(transform_list, dict):
raise ValueError('Transform list must be a list of dict')
# Each transform must have the same patch_size
patch_size_list = [tl['patch_size'] for tl in transform_list]
if patch_size_list.count(patch_size_list[0]) is not len(transform_list):
raise ValueError('Incoherent patch size in the concatenation of transforms.'
'Each transform must have the same patch size')
# TODO: Check if patch_vec is a numpy array or tf??
# Since multiple transforms are performed, it has to be scaled
scale_factor = np.sqrt(len(transform_list))
decomposition_coeff = []
bookkeeping_mat = []
for transform in transform_list:
if transform['name'].lower() == 'dirac':
decomposition_coeff.append(patch_vec/scale_factor)
bookkeeping_mat.append(np.array((transform['patch_size'], transform['patch_size']))) # twice to fit Matlab definition
elif transform['name'].lower() == 'wavelet':
cv, bk = wavelet_decomposition(patch_vec/scale_factor, transform)
decomposition_coeff.append(cv)
bookkeeping_mat.append(bk)
else:
raise NotImplementedError('Only supports \'dirac\' and \'wavelet\' transform')
return decomposition_coeff, bookkeeping_mat
def multiple_transform_reconstruction(decomposition_coeff, bookkeeping_mat, transform_list):
"""
Perform the reconstruction of patch by a concatenation of transforms
:param decomposition_coeff: list of array
:param bookkeeping_mat: list of array
:param transform_list: list of transform dict
:return: patch_vec: array, shape = (patch_height * patch_width,)
"""
# TODO: tf
if not is_array_of(decomposition_coeff, np.ndarray):
raise ValueError('Decomposition coefficient list must be a list of np.ndarray')
if not is_array_of(bookkeeping_mat, np.ndarray):
raise ValueError('Bookkeeping matrix list must be a list of np.ndarray')
# Check if transform_list is a list of dict
if not is_array_of(transform_list, dict):
raise ValueError('Transform list must be a list of dict')
# Each transform must have the same patch_size
patch_size_list = [tl['patch_size'] for tl in transform_list]
if patch_size_list.count(patch_size_list[0]) is not len(transform_list):
raise ValueError('Incoherent patch size in the concatenation of transforms. '
'Each transform must have the same patch size')
patch_size = transform_list[0]['patch_size']
patch_vec = np.zeros((np.prod(patch_size)))
scale_factor = np.sqrt(len(transform_list))
for cv, bk, transform in zip(decomposition_coeff, bookkeeping_mat, transform_list):
if transform['name'].lower() == 'dirac':
patch_vec += cv / scale_factor
elif transform['name'].lower() == 'wavelet':
patch_vec += wavelet_reconstruction(cv, bk, transform) / scale_factor
else:
raise NotImplementedError('Only supports \'dirac\' and \'wavelet\' transform')
return patch_vec
def plot_image_set(image_list, name_list, fig=None, sub_plt_n_w=4):
"""
Plot an image set given as a list
:param image_list: list of images
:param name_list: list of names
:param fig: figure obj
:param sub_plt_n_w: int, number of subplot spaning the width
:return:
"""
# TODO: align images 'top'
if fig is None:
fig = plt.figure()
sub_plt_n_h = int(np.ceil(len(image_list) / sub_plt_n_w))
ax = []
for i, (im, im_name) in enumerate(zip(image_list, name_list)):
ax.append(fig.add_subplot(sub_plt_n_h, sub_plt_n_w, i + 1))
ax[i].imshow(im, cmap='gray', vmin=im.min(), vmax=im.max())
ax[i].set_axis_off()
ax[i].set_title('{}\n({}, {})'.format(im_name, im.shape[0], im.shape[1]), fontsize=10)
def plot_image_with_cbar(image, title=None, cmap='gray', vmin=None, vmax=None, ax=None):
"""
Plot an image with it's colorbar
:param image: array, shape = (image_height, image_width)
:param title: option title
:param cmap: optional cmap
:param vmin: optional vmin
:param vmax: optional vmax
:param ax: optional axis
:return:
"""
if ax is None:
ax = plt.gca()
im = ax.imshow(image, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_axis_off()
if title is not None:
ax.set_title('{}'.format(title), fontsize=12)
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
def plot_decomposition_coeffs(coeffs_vec, title=None, ax=None, theta=None, theta_same_col=False):
"""
Plot decomposition coefficients and the corresponding threshold (theta) if given
:param coeffs_vec: array, shape = (patch_height * patch_width,)
:param title: optional title
:param ax: optional axis
:param theta: optional threshold
:param theta_same_col: optional color flag
:return:
"""
if ax is None:
ax = plt.gca()
base_line, = ax.plot(coeffs_vec)
if title is not None:
ax.set_title('{}'.format(title), fontsize=12)
if theta is not None:
# If theta is a scalar, i.e. same threshold applied to each coefficients, a vector of theta is created
theta_plt = theta*np.ones(coeffs_vec.shape)
if theta_same_col:
ax.plot(theta_plt, '--', color=base_line.get_color())
else:
ax.plot(theta_plt, '--')
def convert_transform_dict_to_tf(transform_dict):
"""
Transform dictionary conversion to tensorflow
:param transform_dict:
:return: tf_transform_dict
"""
tf_transform_dict = dict()
tf_transform_dict['name'] = tf.constant(transform_dict['name'], tf.string)
tf_transform_dict['patch_size'] = transform_dict['patch_size']
# tf_transform_dict['tf_patch_size'] = tf.TensorShape(dims=transform_dict['patch_size'])
tf_transform_dict['coeff_number'] = tf.constant(transform_dict['coeff_number'], tf.int64)
tf_transform_dict['level'] = tf.constant(transform_dict['level'], tf.int32)
if transform_dict['name'] == 'dirac':
pass
elif transform_dict['name'] == 'wavelet':
tf_transform_dict['mode'] = tf.constant(transform_dict['mode'], tf.string)
tf_transform_dict['wavelet_type'] = tf.constant(transform_dict['wavelet_type'], tf.string)
return tf_transform_dict
def convert_transform_list_to_tf(transform_list):
"""
List of transform dictionary conversion to tensorflow
:param transform_list:
:return: tf_transform_list
"""
tf_transform_list = [convert_transform_dict_to_tf(transform_dict) for transform_dict in transform_list]
return tf_transform_list
def tf_pywt_wavelet_decomposition(patch_vec, patch_size, name, wavelet_type, level, mode):
"""
:param patch_vec:
:param patch_size:
:param name:
:param wavelet_type:
:param level:
:param mode:
:return:
"""
# TODO: docstring
# Convert input values for pywt
wavelet_type = wavelet_type.decode('utf-8')
mode = mode.decode('utf-8')
level = int(level)
patch_size = tuple(patch_size)
name = name.decode('utf-8')
# print('wavelet_type: {}, {}'.format(wavelet_type, type(wavelet_type)))
# print('mode: {}, {}'.format(mode, type(mode)))
# print('level: {}, {}'.format(level, type(level)))
# print('patch_vec: {}, {}'.format(patch_vec, type(patch_vec)))
# print('patch_size: {}, {}'.format(patch_size, type(patch_size)))
# print('name: {}, {}'.format(name, type(name)))
# Rebuild transform_dict from unpacked inputs
transform_dict = generate_transform_dict(patch_size, name, wavelet=wavelet_type, level=level, mode=mode)
# print(transform_dict)
# Decomposition
coeffs_vec, bookkeeping_mat = wavelet_decomposition(patch_vec, transform_dict)
return coeffs_vec.astype(np.float32), bookkeeping_mat.astype(np.int32)
def tf_pywt_wavelet_reconstruction(coeffs_vec, bookkeeping_mat, patch_size, name, wavelet_type, level, mode):
"""
:param coeffs_vec:
:param bookkeeping_mat:
:param patch_size:
:param name:
:param wavelet_type:
:param level:
:param mode:
:return:
"""
# TODO: docstring
# Convert input values for pywt
# print(coeffs_vec, type(coeffs_vec))
# print(bookkeeping_mat, type(bookkeeping_mat))
wavelet_type = wavelet_type.decode('utf-8')
mode = mode.decode('utf-8')
level = int(level)
patch_size = tuple(patch_size)
name = name.decode('utf-8')
# print('wavelet_type: {}, {}'.format(wavelet_type, type(wavelet_type)))
# print('mode: {}, {}'.format(mode, type(mode)))
# print('level: {}, {}'.format(level, type(level)))
# print('patch_vec: {}, {}'.format(patch_vec, type(patch_vec)))
# print('patch_size: {}, {}'.format(patch_size, type(patch_size)))
# print('name: {}, {}'.format(name, type(name)))
# Rebuild transform_dict from unpacked inputs
transform_dict = generate_transform_dict(patch_size, name, wavelet=wavelet_type, level=level, mode=mode)
# Reconstruction
patch_vec = wavelet_reconstruction(coeffs_vec, bookkeeping_mat, transform_dict)
return patch_vec.astype(np.float32)
def tf_wavelet_decomposition(tf_patch_vec, tf_transform_dict, flag_pywt=True):
# ONLY POSSIBLE YET USING THE PYWT INTERFACE
# TODO: wavelet decomposition within tf
if not flag_pywt:
raise NotImplementedError('Only possible using the PyWavelet interface')
tf_coeffs_vec, tf_bookkeeping_mat = \
tf.py_func(tf_pywt_wavelet_decomposition, [tf_patch_vec,
tf_transform_dict['patch_size'],
tf_transform_dict['name'],
tf_transform_dict['wavelet_type'],
tf_transform_dict['level'],
tf_transform_dict['mode']],
Tout=[tf.float32, tf.int32])
return tf_coeffs_vec, tf_bookkeeping_mat
def tf_wavelet_reconstruction(tf_coeffs_vec, tf_bookkeeping_mat, tf_transform_dict, flag_pywt=True):
# ONLY POSSIBLE YET USING THE PYWT INTERFACE
# TODO: wavelet decomposition within tf
if not flag_pywt:
raise NotImplementedError('Only possible using the PyWavelet interface')
tf_patch_vec = tf.py_func(tf_pywt_wavelet_reconstruction, [tf_coeffs_vec, tf_bookkeeping_mat,
tf_transform_dict['patch_size'],
tf_transform_dict['name'],
tf_transform_dict['wavelet_type'],
tf_transform_dict['level'],
tf_transform_dict['mode']],
Tout=tf.float32)
return tf_patch_vec
def tf_multiple_transform_decomposition(tf_patch_vec, tf_transform_list):
# TODO: some checks as the non-tf version
# TODO: condition doesn't work with tf.cond()
# # If True, i.e. 'dirac'
# def f1():
# tf_cv = tf_patch_vec / scale_factor
# tf_bk = tf.constant(transform['patch_size'])
# return tf_cv, tf_bk
#
# # If False, i.e. 'wavelet'
# def f2():
# tf_cv, tf_bk = tf_wavelet_decomposition(tf_patch_vec / scale_factor, transform)
# return tf_cv, tf_bk
# Since multiple transforms are performed, it has to be scaled
scale_factor = tf.sqrt(float(len(tf_transform_list)))
tf_decomposition_coeff = []
tf_bookkeeping_mat = []
for transform in tf_transform_list:
# TODO: use tf.case which seems more safe and can handle other possibility
# tf_cv, tf_bk = tf.cond(tf.equal(transform['name'], tf.constant('dirac', tf.string)), f1, f2)
# Shape is unknown since it comes from a python interface for the moment
tf_cv, tf_bk = tf_wavelet_decomposition(tf_patch_vec/scale_factor, transform)
tf_decomposition_coeff.append(tf_cv)
tf_bookkeeping_mat.append(tf_bk)
return tf_decomposition_coeff, tf_bookkeeping_mat
def tf_multiple_transform_reconstruction(tf_decomposition_coeff, tf_bookkeeping_mat, tf_transform_list):
# TODO: some checks as the non-tf version
# TODO: condition doesn't work with tf.cond()
# Same patch sizes for each transform
patch_size = tf_transform_list[0]['patch_size']
tf_patch_vec = tf.zeros((np.prod(patch_size)), dtype=tf.float32)
scale_factor = tf.sqrt(float(len(tf_transform_list)))
for cv, bk, transform in zip(tf_decomposition_coeff, tf_bookkeeping_mat, tf_transform_list):
# TODO: condition doesn't work with tf.cond(), transform['name'] should be checked!! only wavelet now
tf_patch_vec += tf_wavelet_reconstruction(cv, bk, transform) / scale_factor
return tf_patch_vec
def tf_soft_thresholding(coeff, theta):
return tf.mul(tf.sign(coeff), tf.maximum(tf.constant(0, dtype=tf.float32), tf.sub(tf.abs(coeff), theta))) | mit |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/stats/math.py | 25 | 3253 | # pylint: disable-msg=E1103
# pylint: disable-msg=W0212
from __future__ import division
from pandas.compat import range
import numpy as np
import numpy.linalg as linalg
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
import scipy.linalg as SL
D = SL.svdvals(X)
result = np.add.reduce(np.greater(D / D.max(), cond))
return int(result.astype(np.int32))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def solve(a, b):
"""Returns the solution of A X = B."""
try:
return linalg.solve(a, b)
except linalg.LinAlgError:
return np.dot(linalg.pinv(a), b)
def inv(a):
"""Returns the inverse of A."""
try:
return np.linalg.inv(a)
except linalg.LinAlgError:
return np.linalg.pinv(a)
def is_psd(m):
eigvals = linalg.eigvals(m)
return np.isreal(eigvals).all() and (eigvals >= 0).all()
def newey_west(m, max_lags, nobs, df, nw_overlap=False):
"""
Compute Newey-West adjusted covariance matrix, taking into account
specified number of leads / lags
Parameters
----------
m : (N x K)
max_lags : int
nobs : int
Number of observations in model
df : int
Degrees of freedom in explanatory variables
nw_overlap : boolean, default False
Assume data is overlapping
Returns
-------
ndarray (K x K)
Reference
---------
Newey, W. K. & West, K. D. (1987) A Simple, Positive
Semi-definite, Heteroskedasticity and Autocorrelation Consistent
Covariance Matrix, Econometrica, vol. 55(3), 703-708
"""
Xeps = np.dot(m.T, m)
for lag in range(1, max_lags + 1):
auto_cov = np.dot(m[:-lag].T, m[lag:])
weight = lag / (max_lags + 1)
if nw_overlap:
weight = 0
bb = auto_cov + auto_cov.T
dd = (1 - weight) * bb
Xeps += dd
Xeps *= nobs / (nobs - df)
if nw_overlap and not is_psd(Xeps):
new_max_lags = int(np.ceil(max_lags * 1.5))
# print('nw_overlap is True and newey_west generated a non positive '
# 'semidefinite matrix, so using newey_west with max_lags of %d.'
# % new_max_lags)
return newey_west(m, new_max_lags, nobs, df)
return Xeps
def calc_F(R, r, beta, var_beta, nobs, df):
"""
Computes the standard F-test statistic for linear restriction
hypothesis testing
Parameters
----------
R: ndarray (N x N)
Restriction matrix
r: ndarray (N x 1)
Restriction vector
beta: ndarray (N x 1)
Estimated model coefficients
var_beta: ndarray (N x N)
Variance covariance matrix of regressors
nobs: int
Number of observations in model
df: int
Model degrees of freedom
Returns
-------
F value, (q, df_resid), p value
"""
from scipy.stats import f
hyp = np.dot(R, beta.reshape(len(beta), 1)) - r
RSR = np.dot(R, np.dot(var_beta, R.T))
q = len(r)
F = np.dot(hyp.T, np.dot(inv(RSR), hyp)).squeeze() / q
p_value = 1 - f.cdf(F, q, nobs - df)
return F, (q, nobs - df), p_value
| gpl-2.0 |
dshean/pygeotools | pygeotools/lib/geolib.py | 1 | 85261 | #! /usr/bin/env python
"""
Geospatial functions for rasters, vectors.
"""
#Need to make sure all geom have spatial reference included
import sys
import os
import requests
import numpy as np
from osgeo import gdal, ogr, osr
#Enable GDAL exceptions
gdal.UseExceptions()
#Below are many spatial reference system definitions
#Define WGS84 srs
#mpd = 111319.9
wgs_srs = osr.SpatialReference()
wgs_srs.SetWellKnownGeogCS('WGS84')
wgs_proj = '+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs '
#Define ECEF srs
ecef_srs=osr.SpatialReference()
ecef_srs.ImportFromEPSG(4978)
#Define ITRF2008 srs
itrf_srs=osr.SpatialReference()
itrf_srs.ImportFromEPSG(5332)
#TOPEX ellipsoid
tp_srs = osr.SpatialReference()
#tp_proj = '+proj=latlong +a=6378136.300000 +rf=298.25700000 +no_defs'
tp_proj = '+proj=latlong +a=6378136.300000 +b=6356751.600563 +towgs84=0,0,0,0,0,0,0 +no_defs'
tp_srs.ImportFromProj4(tp_proj)
#Vertical CS setup
#See: http://lists.osgeo.org/pipermail/gdal-dev/2011-August/029856.html
#https://github.com/OSGeo/proj.4/wiki/VerticalDatums
#Note: must have gtx grid files in /usr/local/share/proj
#Should add a check for these
#cd /usr/local/share/proj
#wget http://download.osgeo.org/proj/vdatum/egm96_15/egm96_15.gtx
#wget http://download.osgeo.org/proj/vdatum/egm08_25/egm08_25.gtx
#NAD83 (ellipsoid) to NAVD88 (orthometric)
#wget http://download.osgeo.org/proj/vdatum/usa_geoid/g.tar.gz
#tar -xzvf g.tar.gz
#rm g.tar.gz
#wget http://download.osgeo.org/proj/vdatum/usa_geoid2012.zip
#unzip usa_geoid2012.zip
#rm usa_geoid2012.zip
egm96_srs=osr.SpatialReference()
egm96_srs.ImportFromProj4("+proj=longlat +datum=WGS84 +no_defs +geoidgrids=egm96_15.gtx")
#Define EGM2008 srs
egm08_srs=osr.SpatialReference()
egm08_srs.ImportFromProj4("+proj=longlat +datum=WGS84 +no_defs +geoidgrids=egm08_25.gtx")
#Define NAD83/NAVD88 srs for CONUS
navd88_conus_srs=osr.SpatialReference()
navd88_conus_srs.ImportFromProj4("+proj=longlat +datum=NAD83 +no_defs +geoidgrids=g2012a_conus.gtx")
#Define NAD83/NAVD88 srs for Alaska
navd88_alaska_srs=osr.SpatialReference()
navd88_alaska_srs.ImportFromProj4("+proj=longlat +datum=NAD83 +no_defs +geoidgrids=g2012a_alaska.gtx")
#Define N Polar Stereographic srs
nps_srs=osr.SpatialReference()
#Note: this doesn't stick!
#nps_srs.ImportFromEPSG(3413)
nps_proj = '+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs '
nps_srs.ImportFromProj4(nps_proj)
nps_egm08_srs=osr.SpatialReference()
nps_egm08_srs.ImportFromProj4('+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +geoidgrids=egm08_25.gtx +no_defs')
#Define N Polar Stereographic srs
sps_srs=osr.SpatialReference()
#Note: this doesn't stick!
#sps_srs.ImportFromEPSG(3031)
sps_proj = '+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs '
sps_srs.ImportFromProj4(sps_proj)
sps_egm08_srs=osr.SpatialReference()
sps_egm08_srs.ImportFromProj4('+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +geoidgrids=egm08_25.gtx +no_defs')
aea_grs80_srs=osr.SpatialReference()
#aea_grs80_proj='+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs '
aea_grs80_proj='+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs '
aea_grs80_srs.ImportFromEPSG(3338)
aea_navd88_srs=osr.SpatialReference()
#aea_navd88_proj='+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs +geoidgrids=g2012a_alaska.gtx'
aea_navd88_proj='+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs +towgs84=0,0,0,0,0,0,0 +geoidgrids=g2012a_conus.gtx,g2012a_alaska.gtx,g2012a_guam.gtx,g2012a_hawaii.gtx,g2012a_puertorico.gtx,g2012a_samoa.gtx +vunits=m +no_defs'
aea_navd88_srs.ImportFromProj4(aea_navd88_proj)
#HMA projection
hma_aea_srs = osr.SpatialReference()
#hma_aea_proj = '+proj=aea +lat_1=25 +lat_2=47 +lon_0=85 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs '
hma_aea_proj = '+proj=aea +lat_1=25 +lat_2=47 +lat_0=36 +lon_0=85 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs '
hma_aea_srs.ImportFromProj4(hma_aea_proj)
#CONUS projection
#CONUS bounds 36, 49, -105, -124
conus_aea_srs = osr.SpatialReference()
conus_aea_proj = '+proj=aea +lat_1=36 +lat_2=49 +lat_0=43 +lon_0=-115 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs '
conus_aea_srs.ImportFromProj4(conus_aea_proj)
#To do for transformations below:
#Check input order of lon, lat
#Helper for coordinate transformations
#Input for each coordinate can be float, ndarray, or masked ndarray
def cT_helper(x, y, z, in_srs, out_srs):
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
if x.shape != y.shape:
sys.exit("Inconsistent number of x and y points")
valid_idx = Ellipsis
#Handle case where we have x array, y array, but a constant z (e.g., 0.0)
if z.shape != x.shape:
#If a constant elevation is provided
if z.shape[0] == 1:
orig_z = z
z = np.zeros_like(x)
z[:] = orig_z
if np.ma.is_masked(x):
z[np.ma.getmaskarray(x)] = np.ma.masked
else:
sys.exit("Inconsistent number of z and x/y points")
#If any of the inputs is masked, only transform points with all three coordinates available
if np.ma.is_masked(x) or np.ma.is_masked(y) or np.ma.is_masked(z):
x = np.ma.array(x)
y = np.ma.array(y)
z = np.ma.array(z)
from pygeotools.lib import malib
valid_idx = ~(malib.common_mask([x,y,z]))
#Prepare (x,y,z) tuples
xyz = np.array([x[valid_idx], y[valid_idx], z[valid_idx]]).T
#Define coordinate transformation
cT = osr.CoordinateTransformation(in_srs, out_srs)
#Loop through each point
xyz2 = np.array([cT.TransformPoint(xi,yi,zi) for (xi,yi,zi) in xyz]).T
#Fill in the masked array
if xyz2.shape[1] == 1:
xyz2 = xyz2.squeeze()
x2, y2, z2 = xyz2[0], xyz2[1], xyz2[2]
else:
x2 = np.zeros_like(x)
y2 = np.zeros_like(y)
z2 = np.zeros_like(z)
x2[valid_idx] = xyz2[0]
y2[valid_idx] = xyz2[1]
z2[valid_idx] = xyz2[2]
return x2, y2, z2
def ll2ecef(lon, lat, z=0.0):
return cT_helper(lon, lat, z, wgs_srs, ecef_srs)
def ecef2ll(x, y, z):
return cT_helper(x, y, z, ecef_srs, wgs_srs)
def ll2itrf(lon, lat, z=0.0):
return cT_helper(lon, lat, z, wgs_srs, itrf_srs)
def itrf2ll(x, y, z):
return cT_helper(x, y, z, itrf_srs, wgs_srs)
def tp2wgs(x, y, z):
return cT_helper(x, y, z, tp_srs, wgs_srs)
def wgs2tp(x, y, z):
return cT_helper(x, y, z, wgs_srs, tp_srs)
#Note: the lat/lon values returned here might be susceptible to rounding errors
#Or are these true offsets due to dz?
#120.0 -> 119.99999999999999
#def geoid2ell(lon, lat, z=0.0, geoid=egm96_srs):
def geoid2ell(lon, lat, z=0.0, geoid=egm08_srs):
llz = cT_helper(lon, lat, z, geoid, wgs_srs)
return lon, lat, llz[2]
#def ell2geoid(lon, lat, z=0.0, geoid=egm96_srs):
def ell2geoid(lon, lat, z=0.0, geoid=egm08_srs):
llz = cT_helper(lon, lat, z, wgs_srs, geoid)
return lon, lat, llz[2]
def ll2nps(lon, lat, z=0.0):
#Should throw error here
if np.any(lat < 0.0):
print("Warning: latitude out of range for output projection")
return cT_helper(lon, lat, z, wgs_srs, nps_srs)
def nps2ll(x, y, z=0.0):
return cT_helper(x, y, z, nps_srs, wgs_srs)
def ll2sps(lon, lat, z=0.0):
if np.any(lat > 0.0):
print("Warning: latitude out of range for output projection")
return cT_helper(lon, lat, z, wgs_srs, sps_srs)
def sps2ll(x, y, z=0.0):
return cT_helper(x, y, z, sps_srs, wgs_srs)
def scale_ps_ds(ds):
clat, clon = get_center(ds)
return scale_ps(clat)
def nps2geoid(x, y, z=0.0, geoid=nps_egm08_srs):
return cT_helper(x, y, z, nps_srs, geoid)
def sps2geoid(x, y, z=0.0, geoid=sps_egm08_srs):
return cT_helper(x, y, z, sps_srs, geoid)
def localtmerc_ds(ds):
lon, lat = get_center(ds, t_srs=wgs_srs)
return localtmerc(lon, lat)
def localtmerc(lon, lat):
local_srs = osr.SpatialReference()
local_proj = '+proj=tmerc +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs
def localortho_ds(ds):
lon, lat = get_center(ds, t_srs=wgs_srs)
return localortho(lon, lat)
def localortho(lon, lat):
"""Create srs for local orthographic projection centered at lat, lon
"""
local_srs = osr.SpatialReference()
local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs
#Transform geometry to local orthographic projection, useful for width/height and area calc
def geom2localortho(geom):
"""Convert existing geom to local orthographic projection
Useful for local cartesian distance/area calculations
"""
cx, cy = geom.Centroid().GetPoint_2D()
lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs)
local_srs = localortho(lon,lat)
local_geom = geom_dup(geom)
geom_transform(local_geom, local_srs)
return local_geom
def ll2local(lon, lat, z=0, local_srs=None):
if local_srs is None:
lonm = lon.mean()
latm = lat.mean()
local_srs = localortho(lonm, latm)
return cT_helper(lon, lat, z, wgs_srs, local_srs)
def sps2local(x, y, z=0, local_srs=None):
if local_srs is None:
xm = x.mean()
ym = y.mean()
lon, lat, z = sps2ll(xm, ym, z)
local_srs = localortho(lon, lat)
return cT_helper(x, y, z, sps_srs, local_srs)
def lldist(pt1, pt2):
(lon1, lat1) = pt1
(lon2, lat2) = pt2
from vincenty import vincenty
d = vincenty((lat1, lon1), (lat2, lon2))
return d
#Scaling factor for area calculations in polar stereographic
#Should multiply the returned value by computed ps area to obtain true area
def scale_ps(lat):
"""
This function calculates the scaling factor for a polar stereographic
projection (ie. SSM/I grid) to correct area calculations. The scaling
factor is defined (from Snyder, 1982, Map Projections used by the U.S.
Geological Survey) as:
k = (mc/m)*(t/tc), where:
m = cos(lat)/sqrt(1 - e2*sin(lat)^2)
t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2)
e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid)
e = sqrt(e2)
mc = m at the reference latitude (70 degrees)
tc = t at the reference latitude (70 degrees)
The ratio mc/tc is precalculated and stored in the variable m70_t70.
From Ben Smith PS scale m file (7/12/12)
"""
lat = np.array(lat)
if np.any(lat > 0):
m70_t70 = 1.9332279
#Hack to deal with pole
lat[lat>=90.0] = 89.999999999
else:
# for 71 deg, southern PS -- checked BS 5/2012
m70_t70 = 1.93903005
lat[lat<=-90.0] = -89.999999999
#for WGS84, a=6378137, 1/f = 298.257223563 -> 1-sqrt(1-e^2) = f
#-> 1-(1-f)^2 = e2 = 0.006694379990141
#e2 = 0.006693883
e2 = 0.006694379990141 # BS calculated from WGS84 parameters 5/2012
e = np.sqrt(e2)
lat = np.abs(np.deg2rad(lat))
slat = np.sin(lat)
clat = np.cos(lat)
m = clat/np.sqrt(1. - e2*slat**2)
t = np.tan(np.pi/4 - lat/2)/((1. - e*slat)/(1. + e*slat))**(e/2)
k = m70_t70*t/m
scale=(1./k)
return scale
def wraplon(lon):
lon = lon % 360.0
return lon
def lon360to180(lon):
"""Convert longitude from (0, 360) to (-180, 180)
"""
if np.any(lon > 360.0) or np.any(lon < 0.0):
print("Warning: lon outside expected range")
lon = wraplon(lon)
#lon[lon > 180.0] -= 360.0
#lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
lon = lon - (lon.astype(int)/180)*360.0
return lon
def lon180to360(lon):
"""Convert longitude from (-180, 180) to (0, 360)
"""
if np.any(lon > 180.0) or np.any(lon < -180.0):
print("Warning: lon outside expected range")
lon = lon360to180(lon)
#lon[lon < 0.0] += 360.0
lon = (lon + 360.0) % 360.0
return lon
#Want to accept np arrays for these
def dd2dms(dd):
"""Convert decimal degrees to degrees, minutes, seconds
"""
n = dd < 0
dd = abs(dd)
m,s = divmod(dd*3600,60)
d,m = divmod(m,60)
if n:
d = -d
return d,m,s
def dms2dd(d,m,s):
"""Convert degrees, minutes, seconds to decimal degrees
"""
if d < 0:
sign = -1
else:
sign = 1
dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600)
return dd
#Note: this needs some work, not sure what input str format was supposed to be
def dms2dd_str(dms_str, delim=' ', fmt=None):
import re
#dms_str = re.sub(r'\s', '', dms_str)
if re.search('[swSW]', dms_str):
sign = -1
else:
sign = 1
#re.split('\s+', s)
#(degree, minute, second, frac_seconds) = map(int, re.split('\D+', dms_str))
#(degree, minute, second) = dms_str.split(delim)[0:3]
#Remove consequtive delimiters (empty string records)
(degree, minute, second) = [s for s in dms_str.split(delim) if s]
#dd = sign * (int(degree) + float(minute) / 60 + float(second) / 3600 + float(frac_seconds) / 36000)
dd = dms2dd(int(degree)*sign, int(minute), float(second))
return dd
def dm2dd(d,m):
"""Convert degrees, decimal minutes to decimal degrees
"""
dd = dms2dd(d,m,0)
return dd
def dd2dm(dd):
"""Convert decimal to degrees, decimal minutes
"""
d,m,s = dd2dms(dd)
m = m + float(s)/3600
return d,m,s
def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY
#Add 0.5 px offset to account for GDAL model - gt 0,0 is UL corner, pixel 0,0 is center
def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY
#Keep this clean and deal with 0.5 px offsets in pixelToMap
def applyGeoTransform(inX, inY, geoTransform):
inX = np.asarray(inX)
inY = np.asarray(inY)
outX = geoTransform[0] + inX * geoTransform[1] + inY * geoTransform[2]
outY = geoTransform[3] + inX * geoTransform[4] + inY * geoTransform[5]
return outX, outY
def invertGeoTransform(geoTransform):
# we assume a 3rd row that is [1 0 0]
# compute determinate
det = geoTransform[1] * geoTransform[5] - geoTransform[2] * geoTransform[4]
if abs(det) < 0.000000000000001:
return
invDet = 1.0 / det
# compute adjoint and divide by determinate
outGeoTransform = [0, 0, 0, 0, 0, 0]
outGeoTransform[1] = geoTransform[5] * invDet
outGeoTransform[4] = -geoTransform[4] * invDet
outGeoTransform[2] = -geoTransform[2] * invDet
outGeoTransform[5] = geoTransform[1] * invDet
outGeoTransform[0] = (geoTransform[2] * geoTransform[3] - geoTransform[0] * geoTransform[5]) * invDet
outGeoTransform[3] = (-geoTransform[1] * geoTransform[3] + geoTransform[0] * geoTransform[4]) * invDet
return outGeoTransform
def block_stats(x,y,z,ds,stat='median',bins=None):
"""Compute points on a regular grid (matching input GDAL Dataset) from scattered point data using specified statistic
Wrapper for scipy.stats.binned_statistic_2d
Note: this is very fast for mean, std, count, but bignificantly slower for median
"""
import scipy.stats as stats
extent = ds_extent(ds)
#[[xmin, xmax], [ymin, ymax]]
range = [[extent[0], extent[2]], [extent[1], extent[3]]]
if bins is None:
bins = (ds.RasterXSize, ds.RasterYSize)
if stat == 'max':
stat = np.max
elif stat == 'min':
stat = np.min
#block_count, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,'count',bins,range)
block_stat, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,stat,bins,range)
#Get valid blocks
#if (stat == 'median') or (stat == 'mean'):
if stat in ('median', 'mean', np.max, np.min):
idx = ~np.isnan(block_stat)
else:
idx = (block_stat != 0)
idx_idx = idx.nonzero()
#Cell centers
res = [(xedges[1] - xedges[0]), (yedges[1] - yedges[0])]
out_x = xedges[:-1]+res[0]/2.0
out_y = yedges[:-1]+res[1]/2.0
out_x = out_x[idx_idx[0]]
out_y = out_y[idx_idx[1]]
out_z = block_stat[idx]
return out_x, out_y, out_z
#Note: the above method returns block_stat, which is already a continuous grid
#Just need to account for ndv and the upper left x_edge and y_edge
def block_stats_grid(x,y,z,ds,stat='median'):
"""Fill regular grid (matching input GDAL Dataset) from scattered point data using specified statistic
"""
mx, my, mz = block_stats(x,y,z,ds,stat)
gt = ds.GetGeoTransform()
pX, pY = mapToPixel(mx, my, gt)
shape = (ds.RasterYSize, ds.RasterXSize)
ndv = -9999.0
a = np.full(shape, ndv)
a[pY.astype('int'), pX.astype('int')] = mz
return np.ma.masked_equal(a, ndv)
#This was an abandoned attempt to split the 2D binning into smaller pieces
#Want to use crude spatial filter to chunk points, then run the median binning
#Instead, just export points and use point2dem
"""
def block_stats_grid_parallel(x,y,z,ds,stat='median'):
extent = ds_extent(ds)
bins = (ds.RasterXSize, ds.RasterYSize)
res = get_res(ds)
#Define block extents
target_blocksize = 10000.
blocksize = floor(target_blocksize/float(res)) * res
xblocks = np.append(np.arange(extent[0], extent[2], blocksize), extent[2])
yblocks = np.append(np.arange(extent[1], extent[3], blocksize), extent[3])
for i in range(xblocks.size-1):
for j in range(yblocks.size-1):
extent = [xblocks[i], yblocks[j], xblocks[i+1], yblocks[j+1]]
xmin, ymin, xmax, ymax = extent
idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))
x[idx], y[idx], z[idx]
mx, my, mz = block_stats(x,y,z,ds,stat)
import scipy.stats as stats
range = [[extent[0], extent[2]], [extent[1], extent[3]]]
block_stat, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,stat,bins,range)
if stat in ('median', 'mean', np.max, np.min):
idx = ~np.isnan(block_stat)
else:
idx = (block_stat != 0)
idx_idx = idx.nonzero()
#Cell centers
#res = [(xedges[1] - xedges[0]), (yedges[1] - yedges[0])]
out_x = xedges[:-1]+res[0]/2.0
out_y = yedges[:-1]+res[1]/2.0
out_x = out_x[idx_idx[0]]
out_y = out_y[idx_idx[1]]
out_z = block_stat[idx]
return out_x, out_y, out_z
gt = ds.GetGeoTransform()
pX, pY = mapToPixel(mx, my, gt)
shape = (ds.RasterYSize, ds.RasterXSize)
ndv = -9999.0
a = np.full(shape, ndv)
a[pY.astype('int'), pX.astype('int')] = mz
return np.ma.masked_equal(a, ndv)
"""
def block_stats_grid_gen(x, y, z, res=None, srs=None, stat='median'):
#extent = np.array([x.min(), x.max(), y.min(), y.max()])
extent = np.array([x.min(), y.min(), x.max(), y.max()])
if res is None:
res = int((extent[2]-extent[0])/256.0)
ds = mem_ds(res, extent, srs)
return block_stats_grid(x,y,z,ds,stat), ds
#Create copy of ds in memory
#Should be able to use mem_drv CreateCopy method
#Alternative brute force implementation
#Should probably move to iolib
def mem_ds_copy(ds_orig):
if True:
from pygeotools.lib import iolib
m_ds = iolib.mem_drv.CreateCopy('', ds_orig, 0)
else:
gt = ds_orig.GetGeoTransform()
srs = ds_orig.GetProjection()
dst_ns = ds_orig.RasterXSize
dst_nl = ds_orig.RasterYSize
nbands = ds_orig.RasterCount
dtype = ds_orig.GetRasterBand(1).DataType
m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, nbands, dtype)
m_ds.SetGeoTransform(gt)
m_ds.SetProjection(srs)
for n in range(nbands):
b = ds_orig.GetRasterBand(n+1)
ndv = b.GetNoDataValue()
#m_ds.AddBand()
m_ds.GetRasterBand(n+1).WriteArray(b.ReadAsArray())
m_ds.GetRasterBand(n+1).SetNoDataValue(ndv)
return m_ds
def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32):
"""Create a new GDAL Dataset in memory
Useful for various applications that require a Dataset
"""
#These round down to int
#dst_ns = int((extent[2] - extent[0])/res)
#dst_nl = int((extent[3] - extent[1])/res)
#This should pad by 1 pixel, but not if extent and res were calculated together to give whole int
dst_ns = int((extent[2] - extent[0])/res + 0.99)
dst_nl = int((extent[3] - extent[1])/res + 0.99)
m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, 1, dtype)
m_gt = [extent[0], res, 0, extent[3], 0, -res]
m_ds.SetGeoTransform(m_gt)
if srs is not None:
m_ds.SetProjection(srs.ExportToWkt())
return m_ds
#Modify proj/gt of dst_fn in place
def copyproj(src_fn, dst_fn, gt=True):
"""Copy projection and geotransform from one raster file to another
"""
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly)
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
dst_ds.SetProjection(src_ds.GetProjection())
if gt:
src_gt = np.array(src_ds.GetGeoTransform())
src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize])
dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize])
#This preserves dst_fn resolution
if np.any(src_dim != dst_dim):
res_factor = src_dim/dst_dim.astype(float)
src_gt[[1, 5]] *= max(res_factor)
#src_gt[[1, 5]] *= min(res_factor)
#src_gt[[1, 5]] *= res_factor
dst_ds.SetGeoTransform(src_gt)
src_ds = None
dst_ds = None
def geom_dup(geom):
"""Create duplicate geometry
Needed to avoid segfault when passing geom around. See: http://trac.osgeo.org/gdal/wiki/PythonGotchas
"""
g = ogr.CreateGeometryFromWkt(geom.ExportToWkt())
g.AssignSpatialReference(geom.GetSpatialReference())
return g
#This should be a function of a new geom class
#Assumes geom has srs defined
#Modifies geom in place
def geom_transform(geom, t_srs):
"""Transform a geometry in place
"""
s_srs = geom.GetSpatialReference()
if not s_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(s_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
def shp_fieldnames(lyr):
fdef = lyr.GetLayerDefn()
f_list = []
for i in range(fdef.GetFieldCount()):
f_list.append(fdef.GetFieldDefn(i).GetName())
return f_list
def shp_dict(shp_fn, fields=None, geom=True):
"""Get a dictionary for all features in a shapefile
Optionally, specify fields
"""
from pygeotools.lib import timelib
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
nfeat = lyr.GetFeatureCount()
print('%i input features\n' % nfeat)
if fields is None:
fields = shp_fieldnames(lyr)
d_list = []
for n,feat in enumerate(lyr):
d = {}
if geom:
geom = feat.GetGeometryRef()
d['geom'] = geom
for f_name in fields:
i = str(feat.GetField(f_name))
if 'date' in f_name:
# date_f = f_name
#If d is float, clear off decimal
i = i.rsplit('.')[0]
i = timelib.strptime_fuzzy(str(i))
d[f_name] = i
d_list.append(d)
#d_list_sort = sorted(d_list, key=lambda k: k[date_f])
return d_list
def lyr_proj(lyr, t_srs, preserve_fields=True):
"""Reproject an OGR layer
"""
#Need to check t_srs
s_srs = lyr.GetSpatialRef()
cT = osr.CoordinateTransformation(s_srs, t_srs)
#Do everything in memory
drv = ogr.GetDriverByName('Memory')
#Might want to save clipped, warped shp to disk?
# create the output layer
#drv = ogr.GetDriverByName('ESRI Shapefile')
#out_fn = '/tmp/temp.shp'
#if os.path.exists(out_fn):
# driver.DeleteDataSource(out_fn)
#out_ds = driver.CreateDataSource(out_fn)
out_ds = drv.CreateDataSource('out')
outlyr = out_ds.CreateLayer('out', srs=t_srs, geom_type=lyr.GetGeomType())
if preserve_fields:
# add fields
inLayerDefn = lyr.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outlyr.CreateField(fieldDefn)
# get the output layer's feature definition
outLayerDefn = outlyr.GetLayerDefn()
# loop through the input features
inFeature = lyr.GetNextFeature()
while inFeature:
# get the input geometry
geom = inFeature.GetGeometryRef()
# reproject the geometry
geom.Transform(cT)
# create a new feature
outFeature = ogr.Feature(outLayerDefn)
# set the geometry and attribute
outFeature.SetGeometry(geom)
if preserve_fields:
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# add the feature to the shapefile
outlyr.CreateFeature(outFeature)
# destroy the features and get the next input feature
inFeature = lyr.GetNextFeature()
#NOTE: have to operate on ds here rather than lyr, otherwise segfault
return out_ds
#See https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html#convert-vector-layer-to-array
#Should check srs, as shp could be WGS84
def shp2array(shp_fn, r_ds=None, res=None, extent=None, t_srs=None):
"""Rasterize input shapefile to match existing raster Dataset (or specified res/extent/t_srs)
"""
if isinstance(shp_fn, ogr.DataSource):
shp_ds = shp_fn
else:
shp_ds = ogr.Open(shp_fn)
lyr = shp_ds.GetLayer()
#This returns xmin, ymin, xmax, ymax
shp_extent = lyr_extent(lyr)
shp_srs = lyr.GetSpatialRef()
# dst_dt = gdal.GDT_Byte
ndv = 0
if r_ds is not None:
r_extent = ds_extent(r_ds)
res = get_res(r_ds, square=True)[0]
if extent is None:
extent = r_extent
r_srs = get_ds_srs(r_ds)
r_geom = ds_geom(r_ds)
# dst_ns = r_ds.RasterXSize
# dst_nl = r_ds.RasterYSize
#Convert raster extent to shp_srs
cT = osr.CoordinateTransformation(r_srs, shp_srs)
r_geom_reproj = geom_dup(r_geom)
r_geom_reproj.Transform(cT)
r_geom_reproj.AssignSpatialReference(t_srs)
lyr.SetSpatialFilter(r_geom_reproj)
#lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
else:
#TODO: clean this up
if res is None:
sys.exit("Must specify input res")
if extent is None:
print("Using input shp extent")
extent = shp_extent
if t_srs is None:
t_srs = r_srs
if not shp_srs.IsSame(t_srs):
print("Input shp srs: %s" % shp_srs.ExportToProj4())
print("Specified output srs: %s" % t_srs.ExportToProj4())
out_ds = lyr_proj(lyr, t_srs)
outlyr = out_ds.GetLayer()
else:
outlyr = lyr
#outlyr.SetSpatialFilter(r_geom)
m_ds = mem_ds(res, extent, srs=t_srs, dtype=gdal.GDT_Byte)
b = m_ds.GetRasterBand(1)
b.SetNoDataValue(ndv)
gdal.RasterizeLayer(m_ds, [1], outlyr, burn_values=[1])
a = b.ReadAsArray()
a = ~(a.astype('Bool'))
return a
def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False):
"""Clip an input raster by input polygon shapefile for given extent
"""
from pygeotools.lib import iolib, warplib
r_ds = iolib.fn_getds(r_fn)
r_srs = get_ds_srs(r_ds)
r_extent = ds_extent(r_ds)
r_extent_geom = bbox2geom(r_extent)
#NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example
shp_ds = ogr.Open(shp_fn)
lyr = shp_ds.GetLayer()
shp_srs = lyr.GetSpatialRef()
if not r_srs.IsSame(shp_srs):
shp_ds = lyr_proj(lyr, r_srs)
lyr = shp_ds.GetLayer()
#This returns xmin, ymin, xmax, ymax
shp_extent = lyr_extent(lyr)
shp_extent_geom = bbox2geom(shp_extent)
#Define the output - can set to either raster or shp
#Could accept as cl arg
out_srs = r_srs
if extent == 'raster':
out_extent = r_extent
elif extent == 'shp':
out_extent = shp_extent
elif extent == 'intersection':
out_extent = geom_intersection([r_extent_geom, shp_extent_geom])
elif extent == 'union':
out_extent = geom_union([r_extent_geom, shp_extent_geom])
else:
print("Unexpected extent specification, reverting to input raster extent")
out_extent = 'raster'
#Add padding around shp_extent
#Should implement buffer here
if pad is not None:
out_extent = pad_extent(out_extent, width=pad)
print("Raster to clip: %s\nShapefile used to clip: %s" % (r_fn, shp_fn))
if verbose:
print(shp_extent)
print(r_extent)
print(out_extent)
r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic')
r = iolib.ds_getma(r_ds)
#If bbox, return without clipping, otherwise, clip to polygons
if not bbox:
#Create binary mask from shp
mask = shp2array(shp_fn, r_ds)
if invert:
mask = ~(mask)
#Now apply the mask
r = np.ma.array(r, mask=mask)
#Return both the array and the dataset, needed for writing out
#Should probably just write r to r_ds and return r_ds
return r, r_ds
def shp2geom(shp_fn):
"""Extract geometries from input shapefile
Need to handle multi-part geom: http://osgeo-org.1560.x6.nabble.com/Multipart-to-singlepart-td3746767.html
"""
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
srs = lyr.GetSpatialRef()
lyr.ResetReading()
geom_list = []
for feat in lyr:
geom = feat.GetGeometryRef()
geom.AssignSpatialReference(srs)
#Duplicate the geometry, or segfault
#See: http://trac.osgeo.org/gdal/wiki/PythonGotchas
#g = ogr.CreateGeometryFromWkt(geom.ExportToWkt())
#g.AssignSpatialReference(srs)
g = geom_dup(geom)
geom_list.append(g)
#geom = ogr.ForceToPolygon(' '.join(geom_list))
#Dissolve should convert multipolygon to single polygon
#return geom_list[0]
ds = None
return geom_list
def geom2shp(geom, out_fn, fields=False):
"""Write out a new shapefile for input geometry
"""
from pygeotools.lib import timelib
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
if os.path.exists(out_fn):
drv.DeleteDataSource(out_fn)
out_ds = drv.CreateDataSource(out_fn)
out_lyrname = os.path.splitext(os.path.split(out_fn)[1])[0]
geom_srs = geom.GetSpatialReference()
geom_type = geom.GetGeometryType()
out_lyr = out_ds.CreateLayer(out_lyrname, geom_srs, geom_type)
if fields:
field_defn = ogr.FieldDefn("name", ogr.OFTString)
field_defn.SetWidth(128)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("path", ogr.OFTString)
field_defn.SetWidth(254)
out_lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn("date", ogr.OFTString)
#This allows sorting by date
field_defn = ogr.FieldDefn("date", ogr.OFTInteger)
field_defn.SetWidth(32)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("decyear", ogr.OFTReal)
field_defn.SetPrecision(8)
field_defn.SetWidth(64)
out_lyr.CreateField(field_defn)
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetGeometry(geom)
if fields:
#Hack to force output extesion to tif, since out_fn is shp
out_path = os.path.splitext(out_fn)[0] + '.tif'
out_feat.SetField("name", os.path.split(out_path)[-1])
out_feat.SetField("path", out_path)
#Try to extract a date from input raster fn
out_feat_date = timelib.fn_getdatetime(out_fn)
if out_feat_date is not None:
datestamp = int(out_feat_date.strftime('%Y%m%d'))
#out_feat_date = int(out_feat_date.strftime('%Y%m%d%H%M'))
out_feat.SetField("date", datestamp)
decyear = timelib.dt2decyear(out_feat_date)
out_feat.SetField("decyear", decyear)
out_lyr.CreateFeature(out_feat)
out_ds = None
#return status?
def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False):
"""Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale)
"""
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
#Create empty geometry
geom = ogr.Geometry(ogr.wkbPolygon)
#Check to make sure we have unmasked data
if a.count() != 0:
#Scale the gt for reduced resolution
#The UL coords should remain the same, as any rounding will trim LR
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
#Get srs
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
#Find the unmasked edges
#Note: using only axis=0 from notmasked_edges will miss undercuts - see malib.get_edgemask
#Better ways to do this - binary mask, sum (see numpy2stl)
#edges0, edges1, edges = malib.get_edges(a)
px = np.ma.notmasked_edges(a, axis=0)
# coord = []
#Combine edge arrays, reversing order and adding first point to complete polygon
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
#x = np.concatenate((edges[0][1][::1], edges[1][1][::-1], [edges[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
#y = np.concatenate((edges[0][0][::1], edges[1][0][::-1], [edges[0][0][0]]))
#Use np arrays for computing mapped coords
mx, my = pixelToMap(x, y, gt)
#Create wkt string
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if int(gdal.__version__.split('.')[0]) >= 3:
if ds_srs.IsSame(wgs_srs):
ds_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
#Make sure geometry has correct srs assigned
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
#Need to get output units and extent for tolerance specification
if simplify:
#2 pixel tolerance
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom
#The following were originally in dem_coreg glas_proc, may require some additional cleanup
#Want to split into cascading levels, with lowest doing pixel-based sampling on array
#See demtools extract_profile.py
def ds_cT(ds, x, y, xy_srs=wgs_srs):
"""Convert input point coordinates to map coordinates that match input dataset
"""
#Convert lat/lon to projected srs
ds_srs = get_ds_srs(ds)
#If xy_srs is undefined, assume it is the same as ds_srs
mX = x
mY = y
if xy_srs is not None:
if not ds_srs.IsSame(xy_srs):
mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)
return mX, mY
#Might be best to pass points as geom, with srs defined
def sample(ds, mX, mY, xy_srs=None, bn=1, pad=0, min_samp_perc=50, circ=False, count=False):
"""Sample input dataset at map coordinates
This is a generic sampling function, and will return value derived from window (dimensions pad*2+1) around each point
By default, assumes input map coords are identical to ds srs. If different, specify xy_srs to enable conversion.
"""
from pygeotools.lib import iolib, malib
#Should offer option to fit plane to points and then sample values with sub-pixel precision
shape = (ds.RasterYSize, ds.RasterXSize)
gt = ds.GetGeoTransform()
b = ds.GetRasterBand(bn)
b_ndv = iolib.get_ndv_b(b)
b_dtype = b.DataType
np_dtype = iolib.gdal2np_dtype(b)
#If necessary, convert input coordiantes to match ds srs
mX, mY = ds_cT(ds, mX, mY, xy_srs=xy_srs)
#This will sample an area corresponding to diameter of ICESat shot
if pad == 'glas':
spotsize = 70
pad = int(np.ceil(((spotsize/gt[1])-1)/2))
mX = np.atleast_1d(mX)
mY = np.atleast_1d(mY)
#Convert to pixel indices
pX, pY = mapToPixel(mX, mY, gt)
#Mask anything outside image dimensions
pX = np.ma.masked_outside(pX, 0, shape[1]-1)
pY = np.ma.masked_outside(pY, 0, shape[0]-1)
common_mask = (~(np.logical_or(np.ma.getmaskarray(pX), np.ma.getmaskarray(pY)))).nonzero()[0]
#Define x and y sample windows
xwin=pad*2+1
ywin=pad*2+1
#This sets the minimum number of valid pixels, default 50%
min_samp = int(np.ceil((min_samp_perc/100.)*xwin*ywin))
#Create circular mask to simulate spot
#This only makes sense for for xwin > 3
if circ:
from pygeotools.lib import filtlib
circ_mask = filtlib.circular_mask(xwin)
min_samp = int(np.ceil((min_samp_perc/100.)*circ_mask.nonzero()[0].size))
pX_int = pX[common_mask].data
pY_int = pY[common_mask].data
#Round to nearest integer indices
pX_int = np.around(pX_int).astype(int)
pY_int = np.around(pY_int).astype(int)
#print("Valid extent: %i" % pX_int.size)
#Create empty array to hold output
#Added the valid pixel count quickly, should clean this up for more systematic stats return at each sample
if count:
stats = np.full((pX_int.size, 3), b_ndv, dtype=np_dtype)
else:
stats = np.full((pX_int.size, 2), b_ndv, dtype=np_dtype)
r = gdal.GRA_NearestNeighbour
#r = gdal.GRA_Cubic
for i in range(pX_int.size):
#Could have float offsets here with GDAL resampling
samp = np.ma.masked_equal(b.ReadAsArray(xoff=pX_int[i]-pad, yoff=pY_int[i]-pad, win_xsize=xwin, win_ysize=ywin, resample_alg=r), b_ndv)
if circ:
samp = np.ma.array(samp, circ_mask)
if samp.count() >= min_samp:
if min_samp > 1:
#Use mean and std
#samp_med = samp.mean()
#samp_mad = samp.std()
#Use median and nmad (robust)
samp_med = malib.fast_median(samp)
samp_mad = malib.mad(samp)
stats[i][0] = samp_med
stats[i][1] = samp_mad
if count:
stats[i][2] = samp.count()
else:
stats[i][0] = samp[0]
stats[i][1] = 0
if count:
stats[i][2] = 1
#vals, resid, coef = ma_fitplane(samp, gt=[0, gt[1], 0, 0, 0, gt[5]], perc=None)
#Compute slope and aspect from plane
#rmse = malib.rmse(resid)
stats = np.ma.masked_equal(stats, b_ndv)
#Create empty array with as input points
if count:
out = np.full((pX.size, 3), b_ndv, dtype=np_dtype)
else:
out = np.full((pX.size, 2), b_ndv, dtype=np_dtype)
#Populate with valid samples
out[common_mask, :] = stats
out = np.ma.masked_equal(out, b_ndv)
return out
def line2pts(geom, dl=None):
"""Given an input line geom, generate points at fixed interval
Useful for extracting profile data from raster
"""
#Extract list of (x,y) tuples at nodes
nodes = geom.GetPoints()
#print "%i nodes" % len(nodes)
#Point spacing in map units
if dl is None:
nsteps=1000
dl = geom.Length()/nsteps
#This only works for equidistant projection!
#l = np.arange(0, geom.Length(), dl)
#Initialize empty lists
l = []
mX = []
mY = []
#Add first point to output lists
l += [0]
x = nodes[0][0]
y = nodes[0][1]
mX += [x]
mY += [y]
#Remainder
rem_l = 0
#Previous length (initially 0)
last_l = l[-1]
#Loop through each line segment in the feature
for i in range(0,len(nodes)-1):
x1, y1 = nodes[i]
x2, y2 = nodes[i+1]
#Total length of segment
tl = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#Number of dl steps we can fit in this segment
#This returns floor
steps = int((tl+rem_l)/dl)
if steps > 0:
dx = ((x2-x1)/tl)*dl
dy = ((y2-y1)/tl)*dl
rem_x = rem_l*(dx/dl)
rem_y = rem_l*(dy/dl)
#Loop through each step and append to lists
for n in range(1, steps+1):
l += [last_l + (dl*n)]
#Remove the existing remainder
x = x1 + (dx*n) - rem_x
y = y1 + (dy*n) - rem_y
mX += [x]
mY += [y]
#Note: could just build up arrays of pX, pY for entire line, then do single z extraction
#Update the remainder
rem_l += tl - (steps * dl)
last_l = l[-1]
else:
rem_l += tl
return l, mX, mY
#Started moving profile extraction code from extract_profile.py to here
#Need to clean this up
def extract_profile(ds, geom, dl=None, km=False):
l, mX, mY = line2pts(geom, dl)
#Need to convert to same srs
pX, pY = mapToPixel(np.array(mX), np.array(mY), ds.GetGeoTransform())
l = np.array(l)
if km:
l /= 1000.
z = sample(ds, mX, mY)
return (l, mX, mY, z)
def get_res_stats(ds_list, t_srs=None):
"""Return resolution stats for an input dataset list
"""
if t_srs is None:
t_srs = get_ds_srs(ds_list[0])
res = np.array([get_res(ds, t_srs=t_srs) for ds in ds_list])
#Check that all projections are identical
#gt_array = np.array([ds.GetGeoTransform() for ds in args])
#xres = gt_array[:,1]
#yres = -gt_array[:,5]
#if xres == yres:
#res = np.concatenate((xres, yres))
min = np.min(res)
max = np.max(res)
mean = np.mean(res)
med = np.median(res)
return (min, max, mean, med)
def get_res(ds, t_srs=None, square=False):
"""Get GDAL Dataset raster resolution
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res
def get_center(ds, t_srs=None):
"""Get center coordinates of GDAL Dataset
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#Note: this is center of center pixel, not ul corner of center pixel
center = [gt[0] + (gt[1] * ds.RasterXSize/2.0), gt[3] + (gt[5] * ds.RasterYSize/2.0)]
#include t_srs.Validate() and t_srs.Fixup()
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
center = list(ct.TransformPoint(*center)[0:2])
return center
def get_ds_srs(ds):
"""Get srs object for GDAL Datset
"""
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs
def srs_check(ds):
"""Check validitiy of Dataset srs
Return True if srs is properly defined
"""
# ds_srs = get_ds_srs(ds)
gt = np.array(ds.GetGeoTransform())
gt_check = ~np.all(gt == np.array((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)))
proj_check = (ds.GetProjection() != '')
#proj_check = ds_srs.IsProjected()
out = False
if gt_check and proj_check:
out = True
return out
def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp
"""
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out
def gt_corners(gt, nx, ny):
"""Get corner coordinates based on input geotransform and raster dimensions
"""
ul = [gt[0], gt[3]]
ll = [gt[0], gt[3] + (gt[5] * ny)]
ur = [gt[0] + (gt[1] * nx), gt[3]]
lr = [gt[0] + (gt[1] * nx), gt[3] + (gt[5] * ny)]
return ul, ll, ur, lr
"""
Notes on extent format:
gdalwarp uses '-te xmin ymin xmax ymax'
gdalbuildvrt uses '-te xmin ymin xmax ymax'
gdal_translate uses '-projwin ulx uly lrx lry' or '-projwin xmin ymax xmax ymin'
These functions should all use 'xmin ymin xmax ymax' for extent, unless otherwise specified
"""
def corner_extent(ul, ll, ur, lr):
"""Get min/max extent based on corner coord
"""
xmin = min(ul[0], ll[0], ur[0], lr[0])
xmax = max(ul[0], ll[0], ur[0], lr[0])
ymin = min(ul[1], ll[1], ur[1], lr[1])
ymax = max(ul[1], ll[1], ur[1], lr[1])
extent = [xmin, ymin, xmax, ymax]
return extent
#This is called by malib.DEM_stack, where we don't necessarily have a ds
def gt_extent(gt, nx, ny):
extent = corner_extent(*gt_corners(gt, nx, ny))
return extent
def ds_extent(ds, t_srs=None):
"""Return min/max extent of dataset based on corner coordinates
xmin, ymin, xmax, ymax
If t_srs is specified, output will be converted to specified srs
"""
ul, ll, ur, lr = gt_corners(ds.GetGeoTransform(), ds.RasterXSize, ds.RasterYSize)
ds_srs = get_ds_srs(ds)
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
#Check to see if ct creation failed
#if ct == NULL:
#Check to see if transform failed
#if not ct.TransformPoint(extent[0], extent[1]):
#Need to check that transformed coordinates fall within appropriate bounds
ul = ct.TransformPoint(*ul)
ll = ct.TransformPoint(*ll)
ur = ct.TransformPoint(*ur)
lr = ct.TransformPoint(*lr)
extent = corner_extent(ul, ll, ur, lr)
return extent
#This rounds to nearest multiple of a
def round_nearest(x, a):
return round(round(x / a) * a, -int(np.floor(np.log10(a))))
#Round extents to nearest pixel
#Should really pad these outward rather than round
def extent_round(extent, precision=1E-3):
#Should force initial stack reation to multiples of res
extround = [round_nearest(i, precision) for i in extent]
#Check that bounds are still within original extent
if False:
extround[0] = max(extent[0], extround[0])
extround[1] = max(extent[1], extround[1])
extround[2] = min(extent[2], extround[2])
extround[3] = min(extent[3], extround[3])
return extround
def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if int(gdal.__version__.split('.')[0]) >= 3:
if ds_srs.IsSame(wgs_srs):
ds_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom
def geom_extent(geom):
#Envelope is ul_x, ur_x, lr_y, ll_y (?)
env = geom.GetEnvelope()
#return xmin, ymin, xmax, ymax
return [env[0], env[2], env[1], env[3]]
def lyr_extent(lyr):
#Envelope is ul_x, ur_x, lr_y, ll_y (?)
env = lyr.GetExtent()
#return xmin, ymin, xmax, ymax
return [env[0], env[2], env[1], env[3]]
#Compute dataset extent using geom
def ds_geom_extent(ds, t_srs=None):
geom = ds_geom(ds, t_srs)
return geom_extent(geom)
#Quick and dirty filter to check for points inside bbox
def pt_within_extent(x, y, extent):
xmin, ymin, xmax, ymax = extent
idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))
return idx
#Pad extent
#Want to rewrite to allow for user-specified map units in addition to percentage
def pad_extent(extent, perc=0.1, width=None, uniform=False):
e = np.array(extent)
if width is not None:
dx = dy = width
out = e + np.array([-dx, -dy, dx, dy])
else:
dx = e[2] - e[0]
dy = e[3] - e[1]
if uniform:
dx = dy = np.mean([dx, dy])
out = e + (perc * np.array([-dx, -dy, dx, dy]))
return list(out)
#What happens if input geom have different t_srs???
#Add option to return envelope, don't need additional functions to do this
#Note: this can return multipolygon geometry!
def geom_union(geom_list, **kwargs):
convex=False
union = geom_list[0]
for geom in geom_list[1:]:
union = union.Union(geom)
if convex:
union = union.ConvexHull()
return union
def ds_geom_union(ds_list, **kwargs):
ref_srs = get_ds_srs(ds_list[0])
if 't_srs' in kwargs:
if kwargs['t_srs'] is not None:
if not ref_srs.IsSame(kwargs['t_srs']):
ref_srs = kwargs['t_srs']
geom_list = []
for ds in ds_list:
geom_list.append(ds_geom(ds, t_srs=ref_srs))
union = geom_union(geom_list)
return union
#Check to make sure we have at least 2 input ds
def ds_geom_union_extent(ds_list, **kwargs):
union = ds_geom_union(ds_list, **kwargs)
#Envelope is ul_x, ur_x, lr_y, lr_x
#Define new geom class with better Envelope options?
env = union.GetEnvelope()
return [env[0], env[2], env[1], env[3]]
#Do we need to assign srs after the intersection here?
#intersect.AssignSpatialReference(srs)
def geom_intersection(geom_list, **kwargs):
convex=False
intsect = geom_list[0]
valid = False
for geom in geom_list[1:]:
if intsect.Intersects(geom):
valid = True
intsect = intsect.Intersection(geom)
if convex:
intsect = intsect.ConvexHull()
if not valid:
intsect = None
return intsect
def geom_wh(geom):
"""Compute width and height of geometry in projected units
"""
e = geom.GetEnvelope()
h = e[1] - e[0]
w = e[3] - e[2]
return w, h
#Check to make sure we have at least 2 input ds
#Check to make sure intersection is valid
#***
#This means checking that stereographic projections don't extend beyond equator
#***
def ds_geom_intersection(ds_list, **kwargs):
ref_srs = get_ds_srs(ds_list[0])
if 't_srs' in kwargs:
if kwargs['t_srs'] is not None:
if not ref_srs.IsSame(kwargs['t_srs']):
ref_srs = kwargs['t_srs']
geom_list = []
for ds in ds_list:
geom_list.append(ds_geom(ds, t_srs=ref_srs))
intsect = geom_intersection(geom_list)
return intsect
def ds_geom_intersection_extent(ds_list, **kwargs):
intsect = ds_geom_intersection(ds_list, **kwargs)
if intsect is not None:
#Envelope is ul_x, ur_x, lr_y, lr_x
#Define new geom class with better Envelope options?
env = intsect.GetEnvelope()
intsect = [env[0], env[2], env[1], env[3]]
return intsect
#This is necessary because extent precision is different
def extent_compare(e1, e2, precision=1E-3):
#e1_f = '%0.6f %0.6f %0.6f %0.6f' % tuple(e1)
#e2_f = '%0.6f %0.6f %0.6f %0.6f' % tuple(e2)
e1_f = extent_round(e1, precision)
e2_f = extent_round(e2, precision)
return e1_f == e2_f
#This is necessary because extent precision is different
def res_compare(r1, r2, precision=1E-3):
#r1_f = '%0.6f' % r1
#r2_f = '%0.6f' % r2
r1_f = round_nearest(r1, precision)
r2_f = round_nearest(r2, precision)
return r1_f == r2_f
#Clip raster by shape
#Note, this is a hack that uses gdalwarp command line util
#It is possible to do this with GDAL/OGR python API, but this works for now
#See: http://stackoverflow.com/questions/2220749/rasterizing-a-gdal-layer
def clip_raster_by_shp(dem_fn, shp_fn):
import subprocess
#This is ok when writing to outdir, but clip_raster_by_shp.sh writes to raster dir
#try:
# with open(dem_fn) as f: pass
#except IOError as e:
cmd = ['clip_raster_by_shp.sh', dem_fn, shp_fn]
print(cmd)
subprocess.call(cmd, shell=False)
dem_clip_fn = os.path.splitext(dem_fn)[0]+'_shpclip.tif'
dem_clip_ds = gdal.Open(dem_clip_fn, gdal.GA_ReadOnly)
return dem_clip_ds
#Hack
#extent is xmin ymin xmax ymax
def clip_shp(shp_fn, extent):
import subprocess
out_fn = os.path.splitext(shp_fn)[0]+'_clip.shp'
#out_fn = os.path.splitext(shp_fn)[0]+'_clip'+os.path.splitext(shp_fn)[1]
extent = [str(i) for i in extent]
#cmd = ['ogr2ogr', '-f', 'ESRI Shapefile', out_fn, shp_fn, '-clipsrc']
cmd = ['ogr2ogr', '-f', 'ESRI Shapefile', '-overwrite', '-t_srs', 'EPSG:3031', out_fn, shp_fn, '-clipdst']
cmd.extend(extent)
print(cmd)
subprocess.call(cmd, shell=False)
#Need to combine these with shp2array
#Deal with different srs
#Rasterize shp to binary mask
def fn2mask(fn, r_ds):
v_ds = org.Open(fn)
mask = ds2mask(v_ds, r_ds)
v_ds = None
return mask
#Rasterize ogr dataset to binary mask
def ds2mask(v_ds, r_ds):
lyr = v_ds.GetLayer()
mask = lyr2mask(lyr, r_ds)
lyr = None
return mask
#Rasterize ogr layer to binary mask (core functionality for gdal.RasterizeLayer)
def lyr2mask(lyr, r_ds):
#Create memory raster dataset and fill with 0s
m_ds = gdal.GetDriverByName('MEM').CreateCopy('', r_ds, 1)
b = m_ds.GetRasterBand(1)
b.Fill(0)
b.SetNoDataValue(0)
#Not sure if gdal.RasterizeLayer can handle srs difference
#r_srs = get_ds_srs(m_ds)
#lyr_srs = lyr.GetSpatialReference()
#if not r_srs.IsSame(lyr_srs):
# lyr = lyr_proj(lyr, r_srs)
#Rasterize with values of 1
gdal.RasterizeLayer(m_ds, [1], lyr, burn_values=[1])
a = b.ReadAsArray()
mask = a.astype('Bool')
m_ds = None
return ~mask
#Rasterize ogr geometry to binary mask (creates dummy layer)
def geom2mask(geom, r_ds):
geom_srs = geom.GetSpatialReference()
geom2 = geom_dup(geom)
#Create memory vector dataset and add geometry as new feature
ogr_ds = ogr.GetDriverByName('Memory').CreateDataSource('tmp_ds')
m_lyr = ogr_ds.CreateLayer('tmp_lyr', srs=geom_srs)
feat = ogr.Feature(m_lyr.GetLayerDefn())
feat.SetGeometryDirectly(geom2)
m_lyr.CreateFeature(feat)
mask = lyr2mask(m_lyr, r_ds)
m_lyr = None
ogr_ds = None
return mask
#Old function, does not work with inner rings or complex geometries
def geom2mask_PIL(geom, ds):
from PIL import Image, ImageDraw
#width = ma.shape[1]
#height = ma.shape[0]
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
img = Image.new('L', (width, height), 0)
draw = ImageDraw.Draw(img)
#Check to make sure we have polygon
#3 is polygon, 6 is multipolygon
#At present, this doesn't handle internal polygons
#Want to set these to 0
if (geom.GetGeometryType() == 3):
for ring in geom:
pts = np.array(ring.GetPoints())
px = np.array(mapToPixel(pts[:,0], pts[:,1], gt))
px_poly = px.T.astype(int).ravel().tolist()
draw.polygon(px_poly, outline=1, fill=1)
elif (geom.GetGeometryType() == 6):
for poly in geom:
for ring in poly:
pts = np.array(ring.GetPoints())
px = np.array(mapToPixel(pts[:,0], pts[:,1], gt))
px_poly = px.T.astype(int).ravel().tolist()
draw.polygon(px_poly, outline=1, fill=1)
# polygon = [(x1,y1),(x2,y2),...] or [x1,y1,x2,y2,...]
mask = np.array(img).astype(bool)
return ~mask
def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False):
"""
Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs
"""
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out
#Should add gdal.DEMProcessingOptions support
def gdaldem_mem_ds(ds, processing='hillshade', returnma=False, computeEdges=False):
"""
Wrapper for gdaldem functions
Uses gdaldem API, requires GDAL v2.1+
"""
choices = ["hillshade", "slope", "aspect", "color-relief", "TRI", "TPI", "Roughness"]
out = None
scale=1.0
if not get_ds_srs(ds).IsProjected():
scale=111120
if processing in choices:
out = gdal.DEMProcessing('', ds, processing, format='MEM', computeEdges=computeEdges, scale=scale)
else:
print("Invalid processing choice")
print(choices)
#This should be a separate function
if returnma:
from pygeotools.lib import iolib
out = iolib.ds_getma(out)
return out
#Depreciated
def gdaldem_wrapper(fn, product='hs', returnma=True, verbose=True):
"""Wrapper for gdaldem functions
Note: gdaldem is directly avaialable through API as of GDAL v2.1
https://trac.osgeo.org/gdal/wiki/rfc59.1_utilities_as_a_library
This function is no longer necessry, and will eventually be removed.
"""
#These gdaldem functions should be able to ingest masked array
#Just write out temporary file, or maybe mem vrt?
valid_opt = ['hillshade', 'hs', 'slope', 'aspect', 'color-relief', 'TRI', 'TPI', 'roughness']
try:
open(fn)
except IOError:
print("Unable to open %s" %fn)
if product not in valid_opt:
print("Invalid gdaldem option specified")
import subprocess
from pygeotools.lib import iolib
bma = None
opts = []
if product == 'hs' or product == 'hillshade':
product = 'hillshade'
#opts = ['-compute_edges',]
out_fn = os.path.splitext(fn)[0]+'_hs_az315.tif'
else:
out_fn = os.path.splitext(fn)[0]+'_%s.tif' % product
if not os.path.exists(out_fn):
cmd = ['gdaldem', product]
cmd.extend(opts)
cmd.extend(iolib.gdal_opt_co)
cmd.extend([fn, out_fn])
if verbose:
print(' '.join(cmd))
cmd_opt = {}
else:
fnull = open(os.devnull, 'w')
cmd_opt = {'stdout':fnull, 'stderr':subprocess.STDOUT}
subprocess.call(cmd, shell=False, **cmd_opt)
if returnma:
ds = gdal.Open(out_fn, gdal.GA_ReadOnly)
bma = iolib.ds_getma(ds, 1)
return bma
else:
return out_fn
def gdaldem_slope(fn):
return gdaldem_wrapper(fn, 'slope')
def gdaldem_aspect(fn):
return gdaldem_wrapper(fn, 'aspect')
#Perhaps this should be generalized, and moved to malib
def bilinear(px, py, band_array, gt):
"""Bilinear interpolated point at(px, py) on band_array
"""
#import malib
#band_array = malib.checkma(band_array)
ndv = band_array.fill_value
ny, nx = band_array.shape
# Half raster cell widths
hx = gt[1]/2.0
hy = gt[5]/2.0
# Calculate raster lower bound indices from point
fx =(px -(gt[0] + hx))/gt[1]
fy =(py -(gt[3] + hy))/gt[5]
ix1 = int(np.floor(fx))
iy1 = int(np.floor(fy))
# Special case where point is on upper bounds
if fx == float(nx - 1):
ix1 -= 1
if fy == float(ny - 1):
iy1 -= 1
# Upper bound indices on raster
ix2 = ix1 + 1
iy2 = iy1 + 1
# Test array bounds to ensure point is within raster midpoints
if(ix1 < 0) or(iy1 < 0) or(ix2 > nx - 1) or(iy2 > ny - 1):
return ndv
# Calculate differences from point to bounding raster midpoints
dx1 = px -(gt[0] + ix1*gt[1] + hx)
dy1 = py -(gt[3] + iy1*gt[5] + hy)
dx2 =(gt[0] + ix2*gt[1] + hx) - px
dy2 =(gt[3] + iy2*gt[5] + hy) - py
# Use the differences to weigh the four raster values
div = gt[1]*gt[5]
return(band_array[iy1,ix1]*dx2*dy2/div +
band_array[iy1,ix2]*dx1*dy2/div +
band_array[iy2,ix1]*dx2*dy1/div +
band_array[iy2,ix2]*dx1*dy1/div)
#Jak values over fjord are ~30, offset is -29.99
def get_geoid_offset(ds, geoid_srs=egm08_srs):
"""Return offset for center of ds
Offset is added to input (presumably WGS84 HAE) to get to geoid
Note: requires vertical offset grids in proj share dir - see earlier note
"""
ds_srs = get_ds_srs(ds)
c = get_center(ds)
x, y, z = cT_helper(c[0], c[1], 0.0, ds_srs, geoid_srs)
return z
def get_geoid_offset_ll(lon, lat, geoid_srs=egm08_srs):
x, y, z = cT_helper(lon, lat, 0.0, wgs_srs, geoid_srs)
return z
#Note: the existing egm96-5 dataset has problematic extent
#warplib writes out correct res/extent, but egm96 is empty
#Eventually accept geoma
def wgs84_to_egm96(dem_ds, geoid_dir=None):
from pygeotools.lib import warplib
#Check input dem_ds - make sure WGS84
geoid_dir = os.getenv('ASP_DATA')
if geoid_dir is None:
print("No geoid directory available")
print("Set ASP_DATA or specify")
egm96_fn = geoid_dir+'/geoids-1.1/egm96-5.tif'
try:
open(egm96_fn)
except IOError:
sys.exit("Unable to find "+egm96_fn)
egm96_ds = gdal.Open(egm96_fn)
#Warp egm96 to match the input ma
ds_list = warplib.memwarp_multi([dem_ds, egm96_ds], res='first', extent='first', t_srs='first')
#Try two-step with extent/res in wgs84
#ds_list = warplib.memwarp_multi([dem_ds, egm96_ds], res='first', extent='intersection', t_srs='last')
#ds_list = warplib.memwarp_multi([dem_ds, ds_list[1]], res='first', extent='first', t_srs='first')
print("Extracting ma from dem and egm96 ds")
from pygeotools.lib import iolib
dem = iolib.ds_getma(ds_list[0])
egm96 = iolib.ds_getma(ds_list[1])
print("Removing offset")
dem_egm96 = dem - egm96
return dem_egm96
#Run ASP dem_geoid adjustment utility
#Note: this is multithreaded
def dem_geoid(dem_fn):
out_prefix = os.path.splitext(dem_fn)[0]
adj_fn = out_prefix +'-adj.tif'
if not os.path.exists(adj_fn):
import subprocess
cmd_args = ["-o", out_prefix, dem_fn]
cmd = ['dem_geoid'] + cmd_args
#cmd = 'dem_geoid -o %s %s' % (out_prefix, dem_fn)
print(' '.join(cmd))
subprocess.call(cmd, shell=False)
adj_ds = gdal.Open(adj_fn, gdal.GA_ReadOnly)
#from pygeotools.lib import iolib
#return iolib.ds_getma(adj_ds, 1)
return adj_ds
def dem_geoid_offsetgrid_ds(ds, out_fn=None):
from pygeotools.lib import iolib
a = iolib.ds_getma(ds)
a[:] = 0.0
if out_fn is None:
out_fn = '/tmp/geoidoffset.tif'
iolib.writeGTiff(a, out_fn, ds, ndv=-9999)
import subprocess
cmd_args = ["--geoid", "EGM2008", "-o", os.path.splitext(out_fn)[0], out_fn]
cmd = ['dem_geoid'] + cmd_args
print(' '.join(cmd))
subprocess.call(cmd, shell=False)
os.rename(os.path.splitext(out_fn)[0]+'-adj.tif', out_fn)
o = iolib.fn_getma(out_fn)
return o
def dem_geoid_offsetgrid(dem_fn):
ds = gdal.Open(dem_fn)
out_fn = os.path.splitext(dem_fn)[0]+'_EGM2008offset.tif'
o = dem_geoid_offsetgrid_ds(ds, out_fn)
return o
#Note: funcitonality with masking needs work
def map_interp(bma, gt, stride=1, full_array=True):
import scipy.interpolate
from pygeotools.lib import malib
mx, my = get_xy_ma(bma, gt, stride, origmask=True)
x, y, z = np.array([mx.compressed(), my.compressed(), bma.compressed()])
#Define the domain for the interpolation
if full_array:
#Interpolate over entire array
xi, yi = get_xy_ma(bma, gt, stride, origmask=False)
else:
#Interpolate over buffered area around points
newmask = malib.maskfill(bma)
newmask = malib.mask_dilate(bma, iterations=3)
xi, yi = get_xy_ma(bma, gt, stride, newmask=newmask)
xi = xi.compressed()
yi = yi.compressed()
#Do the interpolation
zi = scipy.interpolate.griddata((x,y), z, (xi,yi), method='cubic')
#f = scipy.interpolate.BivariateSpline(x, y, z)
#zi = f(xi, yi, grid=False)
#f = scipy.interpolate.interp2d(x, y, z, kind='cubic')
#This is a 2D array, only need to specify 1D arrays of x and y for output grid
#zi = f(xi, yi)
if full_array:
zia = np.ma.fix_invalid(zi, fill_value=bma.fill_value)
else:
pxi, pyi = mapToPixel(xi, yi, gt)
pxi = np.clip(pxi.astype(int), 0, bma.shape[1])
pyi = np.clip(pyi.astype(int), 0, bma.shape[0])
zia = np.ma.masked_all_like(bma)
zia.set_fill_value(bma.fill_value)
zia[pyi, pxi] = zi
return zia
def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None):
"""Return arrays of x and y map coordinates for input array and geotransform
"""
pX = np.arange(0, bma.shape[1], stride)
pY = np.arange(0, bma.shape[0], stride)
psamp = np.meshgrid(pX, pY)
#if origmask:
# psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
mask = None
if origmask:
mask = np.ma.getmaskarray(bma)[::stride]
if newmask is not None:
mask = newmask[::stride]
mX = np.ma.array(mX, mask=mask, fill_value=0)
mY = np.ma.array(mY, mask=mask, fill_value=0)
return mX, mY
def get_xy_1D(ds, stride=1, getval=False):
"""Return 1D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
mX, dummy = pixelToMap(pX, pY[0], gt)
dummy, mY = pixelToMap(pX[0], pY, gt)
return mX, mY
def get_xy_grids(ds, stride=1, getval=False):
"""Return 2D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
psamp = np.meshgrid(pX, pY)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
return mX, mY
def fitPlaneSVD(XYZ):
"""Fit a plane to input point data using SVD
"""
[rows,cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows,1)))
AB = np.hstack([XYZ,p])
[u, d, v] = np.linalg.svd(AB,0)
# Solution is last column of v.
B = np.array(v[3,:])
coeff = -B[[0, 1, 3]]/B[2]
return coeff
def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ
"""
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff
#Generic 2D polynomial fits
#https://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
def polyfit2d(x, y, z, order=1):
import itertools
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z, rcond=None)
return m
def polyval2d(x, y, m):
import itertools
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
#0 could be a valid value
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
#Should use numpy.polynomial.polynomial.polyvander2d
def ma_fitpoly(bma, order=1, gt=None, perc=(2,98), origmask=True):
"""Fit a plane to values in input array
"""
if gt is None:
gt = [0, 1, 0, 0, 0, -1]
#Filter, can be useful to remove outliers
if perc is not None:
from pygeotools.lib import filtlib
bma_f = filtlib.perc_fltr(bma, perc)
else:
bma_f = bma
#Get indices
x, y = get_xy_ma(bma_f, gt, origmask=origmask)
#Fit only where we have valid data
bma_mask = np.ma.getmaskarray(bma)
coeff = polyfit2d(x[~bma_mask].data, y[~bma_mask].data, bma[~bma_mask].data, order=order)
#For 1D, these are: c, y, x, xy
print(coeff)
#Compute values for all x and y, unless origmask=True
vals = polyval2d(x, y, coeff)
resid = bma - vals
return vals, resid, coeff
def ma_fitplane(bma, gt=None, perc=(2,98), origmask=True):
"""Fit a plane to values in input array
"""
if gt is None:
gt = [0, 1, 0, 0, 0, -1]
#Filter, can be useful to remove outliers
if perc is not None:
from pygeotools.lib import filtlib
bma_f = filtlib.perc_fltr(bma, perc)
else:
bma_f = bma
#Get indices
x_f, y_f = get_xy_ma(bma_f, gt, origmask=origmask)
#Regardless of desired output (origmask True or False), for fit, need to limit to valid pixels only
bma_f_mask = np.ma.getmaskarray(bma_f)
#Create xyz stack, needed for SVD
xyz = np.vstack((np.ma.array(x_f, mask=bma_f_mask).compressed(), \
np.ma.array(y_f, mask=bma_f_mask).compressed(), bma_f.compressed())).T
#coeff = fitPlaneSVD(xyz)
coeff = fitPlaneLSQ(xyz)
print(coeff)
vals = coeff[0]*x_f + coeff[1]*y_f + coeff[2]
resid = bma_f - vals
return vals, resid, coeff
def ds_fitplane(ds):
"""Fit a plane to values in GDAL Dataset
"""
from pygeotools.lib import iolib
bma = iolib.ds_getma(ds)
gt = ds.GetGeoTransform()
return ma_fitplane(bma, gt)
#The following were moved from proj_select.py
def getUTMzone(geom):
"""Determine UTM Zone for input geometry
"""
#If geom has srs properly defined, can do this
#geom.TransformTo(wgs_srs)
#Get centroid lat/lon
lon, lat = geom.Centroid().GetPoint_2D()
#Make sure we're -180 to 180
lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
zonenum = int(np.floor((lon180 + 180)/6) + 1)
#Determine N/S hemisphere
if lat >= 0:
zonehem = 'N'
else:
zonehem = 'S'
#Deal with special cases
if (lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0):
zonenum = 32
if (lat >= 72.0 and lat < 84.0):
if (lon180 >= 0.0 and lon180 < 9.0):
zonenum = 31
elif (lon180 >= 9.0 and lon180 < 21.0):
zonenum = 33
elif (lon180 >= 21.0 and lon180 < 33.0):
zonenum = 35
elif (lon180 >= 33.0 and lon180 < 42.0):
zonenum = 37
return str(zonenum)+zonehem
#Return UTM srs
def getUTMsrs(geom):
utmzone = getUTMzone(geom)
srs = osr.SpatialReference()
srs.SetUTM(int(utmzone[0:-1]), int(utmzone[-1] == 'N'))
return srs
#Want to overload this to allow direct coordinate input, create geom internally
def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry
"""
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs
class ProjBox:
"""Object containing bbox geom and srs
Used for automatic projection determination
"""
def __init__(self, bbox, epsg):
self.bbox = bbox
self.geom = bbox2geom(bbox)
self.srs = osr.SpatialReference()
self.srs.ImportFromEPSG(epsg)
#This provides a preference order for projections
def gen_proj_list():
"""Create list of projections with cascading preference
"""
#Eventually, just read this in from a text file
proj_list = []
#Alaska
#Note, this spans -180/180
proj_list.append(ProjBox([-180, -130, 51.35, 71.35], 3338))
#proj_list.append(ProjBox([-130, 172.4, 51.35, 71.35], 3338))
#Transantarctic Mountains
proj_list.append(ProjBox([150, 175, -80, -70], 3294))
#Greenland
proj_list.append(ProjBox([-180, 180, 58, 82], 3413))
#Antarctica
proj_list.append(ProjBox([-180, 180, -90, -58], 3031))
#Arctic
proj_list.append(ProjBox([-180, 180, 60, 90], 3413))
return proj_list
#This is first hack at centralizing site and projection definitions
class Site:
def __init__(self, name, extent, srs=None, refdem_fn=None):
#Site name: conus, hma
self.name = name
#Spatial extent, list or tuple: [xmin, xmax, ymin, ymax]
self.extent = extent
self.srs = srs
self.refdem_fn = refdem_fn
site_dict = {}
#NED 1/3 arcsec (10 m)
ned13_dem_fn = '/nobackup/deshean/rpcdem/ned13/ned13_tiles_glac24k_115kmbuff.vrt'
#NED 1 arcsec (30 m)
ned1_dem_fn = '/nobackup/deshean/rpcdem/ned1/ned1_tiles_glac24k_115kmbuff.vrt'
site_dict['conus'] = Site(name='conus', extent=(-125, -104, 31, 50), srs=conus_aea_srs, refdem_fn=ned1_dem_fn)
#SRTM-GL1 1 arcsec (30 m)
srtm1_fn = '/nobackup/deshean/rpcdem/hma/srtm1/hma_srtm_gl1.vrt'
site_dict['hma'] = Site(name='hma', extent=(66, 106, 25, 47), srs=hma_aea_srs, refdem_fn=srtm1_fn)
#bbox should be [minlon, maxlon, minlat, maxlat]
#bbox should be [min_x, max_x, min_y, max_y]
def bbox2geom(bbox, a_srs=wgs_srs, t_srs=None):
#Check bbox
#bbox = numpy.array([-180, 180, 60, 90])
x = [bbox[0], bbox[0], bbox[1], bbox[1], bbox[0]]
y = [bbox[2], bbox[3], bbox[3], bbox[2], bbox[2]]
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(x,y)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if a_srs is not None:
if int(gdal.__version__.split('.')[0]) >= 3:
if a_srs.IsSame(wgs_srs):
a_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
geom.AssignSpatialReference(a_srs)
if t_srs is not None:
if not a_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(a_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom
def xy2geom(x, y, t_srs=None):
"""Convert x and y point coordinates to geom
"""
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom
def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None):
"""
Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes
"""
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd
#Formulas for CE90/LE90 here:
#http://www.fgdc.gov/standards/projects/FGDC-standards-projects/accuracy/part3/chapter3
def CE90(x_offset,y_offset):
RMSE_x = np.sqrt(np.sum(x_offset**2)/x_offset.size)
RMSE_y = np.sqrt(np.sum(y_offset**2)/y_offset.size)
c95 = 2.4477
c90 = 2.146
RMSE_min = min(RMSE_x, RMSE_y)
RMSE_max = max(RMSE_x, RMSE_y)
ratio = RMSE_min/RMSE_max
if ratio > 0.6 and ratio < 1.0:
out = c90 * 0.5 * (RMSE_x + RMSE_y)
else:
out = c90 * np.sqrt(RMSE_x**2 + RMSE_y**2)
return out
def LE90(z_offset):
RMSE_z = np.sqrt(np.sum(z_offset**2)/z_offset.size)
c95 = 1.9600
c90 = 1.6449
return c90 * RMSE_z
#Get approximate elevation MSL from USGS API using 10-m NED
#https://nationalmap.gov/epqs/
def get_NED(lon, lat):
url = 'https://nationalmap.gov/epqs/pqs.php?x=%.8f&y=%.8f&units=Meters&output=json' % (lon, lat)
r = requests.get(url)
out = np.nan
ned_ndv = -1000000
if r.status_code == 200:
out = r.json()['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']
out = float(out)
if out == ned_ndv:
out = np.nan
else:
print("USGS elevation MSL: %0.2f" % out)
return out
#USGS API can only handle one point at a time
get_NED_np = np.vectorize(get_NED)
#Get approximate elevation MSL from Open Elevation API (global)
#Note that this can periodically fail, likely throttled - need more robust request
#ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
#Can do multiple points at once:
#https://github.com/Jorl17/open-elevation/blob/master/docs/api.md
def get_OpenElevation(lon, lat):
import time
if isinstance(lon, (list, tuple, np.ndarray)):
#https://api.open-elevation.com/api/v1/lookup\?locations\=10,10\|20,20\|41.161758,-8.583933
locstr = '|'.join(['%0.8f,%0.8f' % i for i in zip(lat, lon)])
url = 'https://api.open-elevation.com/api/v1/lookup?locations=%s' % locstr
out = np.full_like(lon, np.nan)
else:
out = np.nan
url = 'https://api.open-elevation.com/api/v1/lookup?locations=%0.8f,%0.8f' % (lat, lon)
print(url)
i = 0
while(i < 5):
try:
r = requests.get(url)
if r.status_code == 200:
#out = float(r.json()['results'][0]['elevation'])
out = [float(i['elevation']) for i in r.json()['results']]
if len(out) == 1:
out = out[0]
print("Open Elevation MSL: ", out)
break
except:
time.sleep(3)
i += 1
return out
#Get geoid offset from NGS
#https://www.ngs.noaa.gov/web_services/geoid.shtml
def get_GeoidOffset(lon, lat):
#Can specify model, 13 = GEOID12B
url = 'https://geodesy.noaa.gov/api/geoid/ght?lat=%0.8f&lon=%0.8f' % (lat, lon)
r = requests.get(url)
out = np.nan
if r.status_code == 200:
out = float(r.json()['geoidHeight'])
print("NGS geoid offset: %0.2f" % out)
return out
#NGS geoid can only handle one point at a time
get_GeoidOffset_np = np.vectorize(get_GeoidOffset)
#Get elevation (height above mean sea level - default for NED and Open Elevation)
def get_MSL(lon, lat):
out = get_NED_np(lon, lat)
#Check for missing elevations
idx = np.isnan(out)
if np.any(idx):
out[idx] = get_OpenElevation(lon[idx], lat[idx])
return out
#Get elevation (height above WGS84 ellipsoid)
def get_HAE(lon, lat):
out = get_MSL(lon, lat)
idx = np.isnan(out)
#If we have any valid values, remove geoid offset
if np.any(~idx):
offset = get_GeoidOffset_np(lon[~idx], lat[~idx])
out[~idx] += offset
return out
def test_elev_api(lon, lat):
lat = np.random.uniform(-90,90,10)
lon = np.random.uniform(-180,180,10)
return get_MSL(lon,lat)
#Create a raster heatmap from polygon features (geopandas GeoDataFrame)
#res is output grid cell size in meters
def heatmap(gdf, res=1000, out_fn='heatmap.tif', return_ma=False):
from pygeotools.lib import iolib
import subprocess
gpkg_fn = os.path.splitext(out_fn)[0] + '.gpkg'
if not os.path.exists(out_fn):
#Could probably do this in MEM, or maybe gdal_rasterize is now exposed in API
gdf.to_file(gpkg_fn, driver='GPKG')
cmd = ['gdal_rasterize', '-burn', '1', '-tr', str(res), str(res), '-ot', 'UInt32', '-a_nodata', '0', '-add', gpkg_fn, out_fn]
#Add standard raster output options
cmd.extend(iolib.gdal_opt_co)
print(cmd)
subprocess.call(cmd)
if return_ma:
#with rio.Open(out_fn) as src:
# out = src.read()
out = iolib.fn_getma(out_fn)
else:
out = out_fn
return out
#Create a raster heatmap from OGR-readable file (e.g., shp, gpkg)
def heatmap_fn(fn, res=1000, return_ma=False):
import geopandas as gpd
gdf = gpd.read_file(fn)
out_fn = os.path.splitext(fn)[0]+'_heatmap.tif'
return heatmap(gdf, res=res, out_fn=out_fn, return_ma=return_ma)
| mit |
lokeshpancharia/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
anirudhjayaraman/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
NICTA/dora | setup.py | 1 | 1537 | """ Setup utility for the dora package. """
from setuptools import setup, find_packages
# from setuptools.command.test import test as TestCommand
setup(
name='dora',
version='0.1',
description='Active sampling using a non-parametric regression model.',
url='http://github.com/nicta/dora',
packages=find_packages(),
# cmdclass={
# 'test': PyTest
# },
tests_require=['pytest'],
install_requires=[
'scipy >= 0.14.0',
'numpy >= 1.8.2',
'revrand == 0.6.5',
'jupyter >= 1.0.0',
'matplotlib >= 2.0.2',
'flask',
'visvis',
'requests'
],
dependency_links=[
'git+https://github.com/nicta/[email protected]#egg=revrand-0.6.5'],
extras_require={
'demos': [
'unipath',
'requests',
],
},
license="Apache Software License 2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis"
]
)
| apache-2.0 |
HRClab/SimInterace | SimInterface/System.py | 2 | 13623 | """
The fundamental objects of the SimInterface are systems.
"""
try:
import graphviz as gv
graphviz = True
except ImportError:
graphviz = False
import pandas as pd
import numpy as np
import collections as col
import Variable as Var
import inspect as ins
def castToTuple(Vars):
if Vars is None:
return tuple()
elif isinstance(Vars,col.Iterable):
return set(Vars)
else:
return (Vars,)
def castToSet(S):
if isinstance(S,set):
return S
elif isinstance(S,col.Iterable):
return set(S)
elif S is None:
return set()
else:
return set([S])
class System:
"""
I think a better solution would be obtained by just forcing
StateFunc, and OutputFuncs to simply be function objects, so that
the variables would just inherit.
"""
def __init__(self,Funcs=set(),label=''):
self.label=label
self.__buildSystem(Funcs)
def add(self,func):
Funcs = self.Funcs | set([func])
self.__buildSystem(Funcs)
def update(self,NewFuncs):
NewFuncSet = castToSet(NewFuncs)
Funcs = self.Funcs | NewFuncSet
self.__buildSystem(Funcs)
def __buildSystem(self,Funcs):
self.Funcs = castToSet(Funcs)
# Get all the variables
self.Vars = reduce(lambda a,b : a|b,
[f.Vars for f in self.Funcs],
set())
# We will now build an execution order for the output functions
Parents = dict()
Children = dict()
Executable = col.deque()
self.ExecutionOrder = []
for f in self.Funcs:
Parents[f] = set(v.Source for v in f.InputVars) & self.Vars
if (len(Parents[f]) == 0) and (isinstance(f,StaticFunction)):
# If a function has no parents it is executable immediately
# Only put static functions in the execution order
Executable.append(f)
if f in Parents[f]:
# Figure out how to make an error statement here.
print 'Not well-posed, function depends on itself'
# # For convencience we also construct the inverse dictionary
# # For each function, the set of functions that it is used to produce
Children = {f : set() for f in self.Funcs}
for f in self.Funcs:
for g in Children[f]:
Children[g].union(set(f))
# Now finally we create the execution order
while len(Executable) > 0:
f = Executable.pop()
self.ExecutionOrder.append(f)
for child in Children[f]:
Parents[child].remove(f)
if (len(Parents[child]) == 0) and \
(isinstance(child,StaticFunction)):
Executable.append(child)
# Build a dictionary from labels to current values
self.labelToValue = {v.label : np.array(v.data.iloc[0]) \
for v in self.Vars}
##### Things needed for Vector Field ######
self.StateFuncs = [f for f in self.Funcs if len(f.StateVars)>0]
StateVarSet = reduce(lambda a,b : a|b,
[set(f.StateVars) for f in self.Funcs],
set())
self.StateVars = list(StateVarSet)
self.stateToFunc = {v : [] for v in self.StateVars}
for f in self.StateFuncs:
for v in f.StateVars:
self.stateToFunc[v].append(f)
# Create auxilliary states for exogenous signals
self.InputSignals = [v for v in self.Vars if \
(v.Source not in self.Funcs) and \
(isinstance(v,Var.Signal))]
self.IndexSlopes = []
for v in self.InputSignals:
TimeIndex = np.array(v.data.index.levels[1])
slopeList = 1./np.diff(TimeIndex)
self.IndexSlopes.append(slopeList)
##### Initial Condition for ODE Integration ######
Dimensions = [0]
Dimensions.extend([v.data.shape[1] for v in self.StateVars])
self.StateIndexBounds = np.cumsum(Dimensions)
NumStates = len(self.StateVars)
self.InitialState = np.zeros(self.StateIndexBounds[-1] + \
len(self.InputSignals))
for k in range(NumStates):
InitVal = np.array(self.StateVars[k].data.iloc[0])
indLow,indHigh = self.StateIndexBounds[k:k+2]
self.InitialState[indLow:indHigh] = InitVal
self.__createGraph()
def UpdateParameters(self):
Parameters = [v for v in self.Vars if isinstance(v,Var.Parameter)]
for v in Parameters:
self.labelToValue[v.label] = np.array(v.data.iloc[0])
def UpdateSignals(self,Time=[],State=[]):
T = len(Time)
Signals = set([v for v in self.Vars if isinstance(v,Var.Signal)])
InternalSignals = Signals - set(self.InputSignals)
NewData = {v : np.zeros((T,v.data.shape[1])) \
for v in InternalSignals}
for k in range(len(Time)):
t = Time[k]
S = State[k]
self.__setSignalValues(t,S)
for v in InternalSignals:
NewData[v][k] = self.labelToValue[v.label]
for v in InternalSignals:
# Need a reset
v.setData(NewData[v],Time)
def __setSignalValues(self,Time,State):
# Set State Values
NumStates = len(self.StateVars)
for k in range(NumStates):
v = self.StateVars[k]
indLow,indHigh = self.StateIndexBounds[k:k+2]
curVal = State[indLow:indHigh]
self.labelToValue[v.label] = curVal
# Set Input Signal Values
NumIndexStates = len(self.InputSignals)
IndexStateList = State[-NumIndexStates:]
for k in range(NumIndexStates):
ctsIndex = IndexStateList[k]
curInd = int(np.floor(ctsIndex))
nextInd = curInd+1
if nextInd < len(self.IndexSlopes[k]):
v = self.InputSignals[k]
# Linearly interpolate exogenous inputs
# Presumably this could help smoothness.
# and it is not very hard.
prevInput = v.data.iloc[curInd]
nextInput = v.data.iloc[nextInd]
lam = IndexStateList[k] - curInd
# this can be called later.
inputVal = (1-lam) * prevInput + lam * nextInput
self.labelToValue[v.label] = np.array(inputVal)
else:
# If out of bounds just stay at the last value.
self.labelToValue[v.label] = np.array(v.data.iloc[-1])
# Set Intermediate Signal Values
for f in self.ExecutionOrder:
argList = ins.getargspec(f.func)[0]
valList = [self.labelToValue[lab] for lab in argList]
outTup = f.func(*valList)
if len(f.OutputVars) > 1:
for k in len(f.OutputVars):
outVariable = f.OutputVars[k]
outValue = outTup[k]
self.labelToValue[outVariable.label] = outValue
else:
self.labelToValue[f.OutputVars[0].label] = outTup
def VectorField(self,Time,State):
"""
Something suitable for passing to ODE methods.
"""
State_dot = np.zeros(len(State))
self.__setSignalValues(Time,State)
NumStates = len(self.StateVars)
# Apply the vector fields
## Compute
NumIndexStates = len(self.InputSignals)
IndexStateList = State[-NumIndexStates:]
IndexSlopes = np.zeros(NumIndexStates)
for k in range(NumIndexStates):
ctsIndex = IndexStateList[k]
curInd = int(np.floor(ctsIndex))
nextInd = curInd+1
if nextInd < len(self.IndexSlopes[k]):
# Not too near end
IndexSlopes[k] = self.IndexSlopes[k][curInd]
else:
# If out of bounds just stay at the last value.
IndexSlopes[k] = 0.
## Plug in the derivative of the index slopes.
State_dot[-NumIndexStates:] = IndexSlopes
## Compute main vector field
dvdt = {v : np.zeros(v.data.shape[1]) for v in self.StateVars}
for f in self.StateFuncs:
argList = ins.getargspec(f.func)[0]
valList = [self.labelToValue[lab] for lab in argList]
dxdt = f.func(*valList)
nx = len(f.StateVars)
# output may or may not be a tuple.
if nx > 1:
for k in range(nx):
dvdt[f.StateVars[k]] += dxdt[k]
else:
dvdt[f.StateVars[0]] += dxdt
for k in range(NumStates):
indLow,indHigh = self.StateIndexBounds[k:k+2]
State_dot[indLow:indHigh] = dvdt[self.StateVars[k]]
return State_dot
def __createGraph(self):
"""
Create a graph using the graphviz module.
It may be advisable to make this a bit more separated.
Namely, make a separate add-on that you pass the system to and it
would produce a graph.
Basically make a separate submodule called "SystemGraph"
"""
if not graphviz:
self.graph = None
return
dot = gv.Digraph(name=self.label)
# Ignore the integrators
NonIntegrators = set([f for f in self.Funcs if f.ftype != 'integrator'])
for f in NonIntegrators:
dot.node(f.label,shape='box')
# Handle state vars and nonstate vars separately
StateVars = set(self.StateVars)
NonState = self.Vars - StateVars
for v in self.Vars:
if (v.Source not in NonIntegrators) and (v in NonState):
dot.node(v.label,shape='plaintext')
for tar in (set(v.Targets) & NonIntegrators):
dot.edge(v.label,tar.label)
else:
for tar in (set(v.Targets) & NonIntegrators):
if v.Source in NonIntegrators:
dot.edge(v.Source.label,tar.label,label=v.label)
if len(set(v.Targets) & self.Funcs) == 0:
dot.node(v.label,shape='plaintext')
if v.Source in NonIntegrators:
dot.edge(v.Source.label,v.label)
# Special handling for states
for v in StateVars:
for f in self.stateToFunc[v]:
for g in (v.Targets & NonIntegrators):
dot.edge(f.label,g.label,label=v.label)
self.graph = dot
def Connect(Systems=set(),label='Sys'):
Funcs = reduce(lambda a,b : a | b, [S.Funcs for S in Systems],set())
return System(Funcs,label)
class Function(System):
def __init__(self,func=lambda : None,label='Fun',
StateVars = tuple(),
InputVars = tuple(),
OutputVars = tuple(),
ftype=None):
self.func = func
self.label = label
self.ftype = ftype
# In general, ordering of variables matters.
# This is especially true of parsing outputs
self.StateVars = castToTuple(StateVars)
self.InputVars = castToTuple(InputVars)
self.OutputVars = castToTuple(OutputVars)
# Sometimes, however, we only care about
# an un-ordered set
StateSet = set(self.StateVars)
InputSet = set(self.InputVars)
OutputSet = set(self.OutputVars)
self.Vars = StateSet | InputSet | OutputSet
map(lambda v : v.Targets.add(self),StateSet | InputSet)
for v in OutputSet:
v.Source = self
System.__init__(self,self,label)
class StaticFunction(Function):
def __init__(self,func=None,InputVars=None,OutputVars=None,label='Fun'):
Function.__init__(self,func=func,label=label,ftype='static',
InputVars=InputVars,OutputVars=OutputVars)
class DifferentialEquation(System):
def __init__(self,func=None,StateVars=None,InputVars=None,
Time=None,label='DiffEq'):
# Dummy signals for the time derivatives
# These "outputs" are fed into a dummy integrator function
OutputVars = []
StateVars = castToTuple(StateVars)
# Make an ordered list of derivative variables corresponding
# to the state variables.
for v in StateVars:
dvdt = Var.Signal(label='d%s/dt' % v.label,
data=np.zeros((1,v.data.shape[1])),
TimeStamp=np.zeros(1))
OutputVars.append(dvdt)
VectorField = Function(func=func,label=label,
InputVars=InputVars,
OutputVars=OutputVars,
StateVars=StateVars,
ftype='vector_field')
Integrator = Function(label='Integrator',
InputVars=OutputVars,
OutputVars=StateVars,
ftype='integrator')
System.__init__(self,
Funcs=set([VectorField,Integrator]),label=label)
| mit |
LevinJ/ud730-Deep-Learning | A1_notmnistdataset/logisticsregresstionsmaple.py | 1 | 1313 | print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using '__' separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| gpl-2.0 |
evanthebouncy/nnhmm | graph4/graph.py | 1 | 4892 | import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import copy
N = 100
def dist(c1, c2):
return np.linalg.norm(np.array(c1) - np.array(c2))
def too_close(pt, pts):
for pt_other in pts:
if dist(pt, pt_other) < 0.05:
return True
return False
def gen_pts(n):
ret = []
while len(ret) < N:
new_pt = (np.random.random(), np.random.random())
if not too_close(new_pt, ret):
ret.append(new_pt)
return ret
def gen_spanning_tree(n_vertex):
nnn = len(n_vertex)
ret = [[0 for i in range(nnn)] for j in range(nnn)]
def get_closest(n_set, nodey):
dists = [(dist(nss[1], nodey[1]), nss) for nss in n_set]
return min(dists)[1]
connected_set = []
n_v = zip(range(nnn), n_vertex)
while len(connected_set) < nnn:
# pick random
rand_node = random.choice(n_v)
n_v.remove(rand_node)
if connected_set == []:
connected_set.append(rand_node)
else:
closest = get_closest(connected_set, rand_node)
ret[closest[0]][rand_node[0]] = 1
ret[rand_node[0]][closest[0]] = 1
connected_set.append(rand_node)
return ret
def gen_graph(n_vertex, c):
n = len(n_vertex)
ret = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(i):
crd_i, crd_j = n_vertex[i], n_vertex[j]
if np.random.random() < np.power(2, -c * dist(crd_i, crd_j)) or\
dist(crd_i, crd_j) < 0.3:
ret[i][j] = 1
ret[j][i] = 1
return ret
def get_blob(blobs, node):
for blob in blobs:
if node in blob:
return blob
return None
# return a map from node to components
def get_ccomp(ge):
blobs = []
for iii in range(N):
for jjj in range(N):
if ge[iii][jjj] == 1 or iii == jjj:
blob_i = get_blob(blobs, iii)
blob_j = get_blob(blobs, jjj)
if blob_i == None and blob_j == None:
blobs.append(set([iii,jjj]))
if blob_i != None and blob_j == None:
blob_i.add(jjj)
if blob_i == None and blob_j != None:
blob_j.add(iii)
if blob_i != None and blob_j != None and blob_i != blob_j:
blobs.remove(blob_i)
blobs.remove(blob_j)
blobs.append(blob_i.union(blob_j))
ret = dict()
for i, blob in enumerate(blobs):
for bb in blob:
ret[bb] = i
return ret
def get_shortest_path(ge, node_i):
fringe = [node_i]
seen = set()
dists = {}
for _ in range(N):
dists[_] = 999
cur_hop = 0
while fringe != []:
cur_nodes = fringe
seen.update(cur_nodes)
fringe = []
for c_n in cur_nodes:
dists[c_n] = cur_hop
for other_j in range(N):
if ge[c_n][other_j] and other_j not in seen:
fringe.append(other_j)
fringe = list(set(fringe))
# print fringe
cur_hop += 1
return dists
def gen_obs_links(ge):
ret = []
for iii in range(N):
for jjj in range(iii):
if ge[iii][jjj] == 1:
ret.append((iii,jjj))
for i in range(300):
pick_x = random.randint(0, N-1)
pick_y = random.randint(0, N-1)
ret.append((pick_x, pick_y))
return ret
def get_shortest_paths(ge):
return [get_shortest_path(ge, i) for i in range(N)]
def random_fail(ge, prob=0.02):
ret = copy.deepcopy(ge)
link_fail = []
for i in range(N):
for j in range(i):
if np.random.random() < prob and ge[i][j] == 1:
ret[i][j] = 0
ret[j][i] = 0
link_fail.append((i,j))
return ret, link_fail
def path_changed(ge, ge_fail, G_OBS):
ret = []
# sp_ge = get_shortest_paths(ge)
# sp_ge_fail = get_shortest_paths(ge_fail)
# ccp_ge = get_ccomp(ge)
ccp_ge_fail = get_ccomp(ge_fail)
for test_pair in G_OBS:
i, j = test_pair
if ccp_ge_fail[i] != ccp_ge_fail[j]:
ret.append([1.0, 0.0])
else:
ret.append([0.0, 1.0])
# if sp_ge[i][j] != sp_ge_fail[i][j]:
# ret.append([1.0, 0.0])
# else:
# ret.append([0.0, 1.0])
return ret
def draw_graph(gv, ge, name):
Gr = nx.Graph()
for i in range(N):
Gr.add_node(i, pos=gv[i])
for i in range(N):
for j in range(N):
if ge[i][j]:
Gr.add_edge(i,j)
labels = dict()
for i in range(N):
labels[i] = str(i)
pos=nx.get_node_attributes(Gr,'pos')
nx.draw(Gr, pos=pos,
node_size=400, with_labels=False)
nx.draw_networkx_labels(Gr, pos, labels)
plt.savefig(name)
# V = gen_pts(N)
# # G = gen_graph(V, 6)
# G = gen_spanning_tree(V)
#
# G_V = V
# G_E = G
#
# print G_V
# print G_E
#
# Gr = nx.Graph()
# for i in range(N):
# Gr.add_node(i, pos=G_V[i])
#
# for i in range(N):
# for j in range(N):
# if G_E[i][j]:
# Gr.add_edge(i,j)
#
# labels = dict()
# for i in range(N):
# labels[i] = str(i)
#
# pos=nx.get_node_attributes(Gr,'pos')
#
# nx.draw(Gr, pos=pos,
# node_size=250, with_labels=False)
# nx.draw_networkx_labels(Gr, pos, labels)
#
# plt.savefig("graph.png")
| mit |
pratapvardhan/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 25 | 8160 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
rs2/pandas | pandas/plotting/_matplotlib/core.py | 1 | 52089 | from typing import TYPE_CHECKING, List, Optional, Tuple
import warnings
from matplotlib.artist import Artist
import numpy as np
from pandas._typing import Label
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_float,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
is_numeric_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.compat import mpl_ge_3_0_0
from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters
from pandas.plotting._matplotlib.style import get_standard_colors
from pandas.plotting._matplotlib.timeseries import (
decorate_axes,
format_dateaxis,
maybe_convert_index,
maybe_resample,
use_dynamic_x,
)
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
format_date_labels,
get_all_lines,
get_xlim,
handle_shared_axes,
table,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
from matplotlib.axis import Axis
def _color_in_style(style: str) -> bool:
"""
Check if there is a color letter in the style string.
"""
from matplotlib.colors import BASE_COLORS
return not set(BASE_COLORS).isdisjoint(style)
class MPLPlot:
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation: Optional[str] = None
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
xlabel: Optional[Label] = None,
ylabel: Optional[Label] = None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormap=None,
table=False,
layout=None,
include_bool=False,
**kwds,
):
import matplotlib.pyplot as plt
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.xlabel = xlabel
self.ylabel = ylabel
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles: List[Artist] = []
self.legend_labels: List[Label] = []
self.logx = kwds.pop("logx", False)
self.logy = kwds.pop("logy", False)
self.loglog = kwds.pop("loglog", False)
self.label = kwds.pop("label", None)
self.style = kwds.pop("style", None)
self.mark_right = kwds.pop("mark_right", True)
self.stacked = kwds.pop("stacked", False)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if "cmap" in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif "cmap" in kwds:
self.colormap = kwds.pop("cmap")
else:
self.colormap = colormap
self.table = table
self.include_bool = include_bool
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if (
"color" in self.kwds
and self.nseries == 1
and not is_list_like(self.kwds["color"])
):
# support series.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and isinstance(self.kwds["color"], tuple)
and self.nseries == 1
and len(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in series plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if _color_in_style(s):
raise ValueError(
"Cannot pass 'style' string with a color symbol and "
"'color' keyword argument. Please use one or the "
"other or pass 'style' without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self) -> int:
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax: "Axes") -> bool:
"""check whether ax has data"""
return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0
def _maybe_right_yaxis(self, ax: "Axes", axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax property, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax property, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = create_subplots(
naxes=self.nseries,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = flatten_axes(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter(input_log - valid_log))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (
is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries
)
if sec_true or all_sec:
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
include_type = [np.number, "datetime", "datetimetz", "timedelta"]
# GH23719, allow plotting boolean
if self.include_bool is True:
include_type.append(np.bool_)
# GH22799, exclude datetime-like type for boxplot
exclude_type = None
if self._kind == "box":
# TODO: change after solving issue 27881
include_type = [np.number]
exclude_type = ["timedelta"]
# GH 18755, include object and category type for scatter plot
if self._kind == "scatter":
include_type.extend(["object", "category"])
numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)
try:
is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not len(numeric_data)
# no non-numeric frames or series allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
# GH9093, currently Pandas does not show ylabel, so if users provide
# ylabel will set it as ylabel in the plot.
if self.ylabel is not None:
ax.set_ylabel(pprint_thing(self.ylabel))
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
raise ValueError(
"The length of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
f"length of title = {len(self.title)}\n"
f"number of columns = {self.nseries}"
)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = (
"Using `title` of type `list` is not supported "
"unless `subplots=True` is passed"
)
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis: "Axis", rot=None, fontsize=None):
"""
Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self) -> Optional[str]:
if not isinstance(self.data.columns, ABCMultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing, self.data.columns.names)
return ",".join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + " (right)"
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg, handle = self._get_ax_legend_handle(self.axes[0])
handles = []
labels = []
title = ""
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
# Replace leg.LegendHandles because it misses marker info
handles.extend(handle)
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == "reverse":
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc="best", title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc="best")
def _get_ax_legend_handle(self, ax: "Axes"):
"""
Take in axes and return ax, legend and handle under different scenarios
"""
leg = ax.get_legend()
# Get handle from axes
handle, _ = ax.get_legend_handles_labels()
other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg, handle
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period: bool = False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and isinstance(index, ABCPeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = list(range(len(index)))
else:
x = list(range(len(index)))
return x
@classmethod
@register_pandas_matplotlib_converters
def _plot(cls, ax: "Axes", x, y, style=None, is_errorbar: bool = False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, ABCIndexClass):
x = x._mpl_repr()
if is_errorbar:
if "xerr" in kwds:
kwds["xerr"] = np.array(kwds.get("xerr"))
if "yerr" in kwds:
kwds["yerr"] = np.array(kwds.get("yerr"))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y) # type: ignore[assignment]
return ax.plot(*args, **kwds)
def _get_index_name(self) -> Optional[str]:
if isinstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
if com.any_not_none(*name):
name = ",".join(pprint_thing(x) for x in name)
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
# GH 9093, override the default xlabel if xlabel is provided.
if self.xlabel is not None:
name = pprint_thing(self.xlabel)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, "left_ax", ax)
else:
return getattr(ax, "right_ax", ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
@classmethod
def get_default_ax(cls, ax):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = cls._get_ax_layer(ax)
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndexClass)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = "color" in kwds or self.colormap is not None
nocolor_style = style is None or not _color_in_style(style)
if (has_color or self.subplots) and nocolor_style:
if isinstance(colors, dict):
kwds["color"] = colors[label]
else:
kwds["color"] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds="color"):
if num_colors is None:
num_colors = self.nseries
return get_standard_colors(
num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds),
)
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
Asymmetrical error bars are also supported, however raw error values
must be provided in this case. For a ``N`` length :class:`Series`, a
``2xN`` array should be provided indicating lower and upper (or left
and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors
should be in a ``Mx2xN`` array.
"""
if err is None:
return None
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, ABCDataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, ABCSeries):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, str):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if isinstance(self.data, ABCSeries) and err_shape[0] == 2:
err = np.expand_dims(err, 0)
err_shape = err.shape
if err_shape[2] != len(self.data):
raise ValueError(
"Asymmetrical error bars should be provided "
f"with the shape (2, {len(self.data)})"
)
elif isinstance(self.data, ABCDataFrame) and err.ndim == 3:
if (
(err_shape[0] != self.nseries)
or (err_shape[1] != 2)
or (err_shape[2] != len(self.data))
):
raise ValueError(
"Asymmetrical error bars should be provided "
f"with the shape ({self.nseries}, 2, {len(self.data)})"
)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = f"No valid {label} detected"
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
errors = {}
for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (ABCDataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [
ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot)
]
def _get_axes_layout(self) -> Tuple[int, int]:
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = "single"
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + " requires an x and y column")
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
# Scatter plot allows to plot objects data
if self._kind == "hexbin":
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires x column to be numeric")
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires y column to be numeric")
self.x = x
self.y = y
@property
def nseries(self) -> int:
return 1
def _post_plot_logic(self, ax: "Axes", data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
def _plot_colorbar(self, ax: "Axes", **kwds):
# Addresses issues #10611 and #10678:
# When plotting scatterplots and hexbinplots in IPython
# inline backend the colorbar axis height tends not to
# exactly match the parent axis height.
# The difference is due to small fractional differences
# in floating points with similar representation.
# To deal with this, this method forces the colorbar
# height to take the height of the parent axes.
# For a more detailed description of the issue
# see the following link:
# https://github.com/ipython/ipython/issues/11215
# GH33389, if ax is used multiple times, we should always
# use the last one which contains the latest information
# about the ax
img = ax.collections[-1]
cbar = self.fig.colorbar(img, ax=ax, **kwds)
if mpl_ge_3_0_0():
# The workaround below is no longer necessary.
return
points = ax.get_position().get_points()
cbar_points = cbar.ax.get_position().get_points()
cbar.ax.set_position(
[
cbar_points[0, 0],
points[0, 1],
cbar_points[1, 0] - cbar_points[0, 0],
points[1, 1] - points[0, 1],
]
)
# To see the discrepancy in axis heights uncomment
# the following two lines:
# print(points[1, 1] - points[0, 1])
# print(cbar_points[1, 1] - cbar_points[0, 1])
class ScatterPlot(PlanePlot):
_kind = "scatter"
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
elif is_hashable(s) and s in data.columns:
s = data[s]
super().__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or "Greys"
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError("Specify exactly one of `c` and `color`")
elif c is None and color is None:
c_values = self.plt.rcParams["patch.facecolor"]
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
# plot colorbar if
# 1. colormap is assigned, and
# 2.`c` is a column containing only numeric values
plot_colorbar = self.colormap or c_is_column
cb = self.kwds.pop("colorbar", is_numeric_dtype(c_values) and plot_colorbar)
if self.legend and hasattr(self, "label"):
label = self.label
else:
label = None
scatter = ax.scatter(
data[x].values,
data[y].values,
c=c_values,
label=label,
cmap=cmap,
**self.kwds,
)
if cb:
cbar_label = c if c_is_column else ""
self._plot_colorbar(ax, label=cbar_label)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds["ecolor"] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds)
class HexBinPlot(PlanePlot):
_kind = "hexbin"
def __init__(self, data, x, y, C=None, **kwargs):
super().__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or "BuGn"
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop("colorbar", True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds)
if cb:
self._plot_colorbar(ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = "line"
_default_rot = 0
orientation = "vertical"
def __init__(self, data, **kwargs):
from pandas.plotting import plot_params
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params["x_compat"]
if "x_compat" in self.kwds:
self.x_compat = bool(self.kwds.pop("x_compat"))
def _is_ts_plot(self) -> bool:
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
return use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
data = maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = com.any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds["label"] = label
newlines = plotf(
ax,
x,
y,
style=style,
column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds,
)
self._add_legend_handle(newlines[0], label, index=i)
if self._is_ts_plot():
# reset of xlim should be used for ts data
# TODO: GH28021, should find a way to change view limit on xaxis
lines = get_all_lines(ax)
left, right = get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(
cls, ax: "Axes", x, y, style=None, column_num=None, stacking_id=None, **kwds
):
# column_num is used to get the target column from plotf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax: "Axes", x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = maybe_resample(data, ax, kwds)
# Set ax with freq info
decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, "left_ax"):
decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, "right_ax"):
decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax: "Axes", stacking_id, n: int):
if stacking_id is None:
return
if not hasattr(ax, "_stacker_pos_prior"):
ax._stacker_pos_prior = {}
if not hasattr(ax, "_stacker_neg_prior"):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax: "Axes", stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, "_stacker_pos_prior"):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError(
"When stacked is True, each column must be either "
"all positive or negative."
f"{label} contains both positive and negative values"
)
@classmethod
def _update_stacker(cls, ax: "Axes", stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax: "Axes", data):
from matplotlib.ticker import FixedLocator
def get_label(i):
if is_float(i) and i.is_integer():
i = int(i)
try:
return pprint_thing(data.index[i])
except Exception:
return ""
if self._need_to_set_index:
xticks = ax.get_xticks()
xticklabels = [get_label(x) for x in xticks]
ax.xaxis.set_major_locator(FixedLocator(xticks))
ax.set_xticklabels(xticklabels)
# If the index is an irregular time series, then by default
# we rotate the tick labels. The exception is if there are
# subplots which don't share their x-axes, in which we case
# we don't rotate the ticklabels as by default the subplots
# would be too close together.
condition = (
not self._use_dynamic_x()
and (data.index.is_all_dates and self.use_index)
and (not self.subplots or (self.subplots and self.sharex))
)
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = "area"
def __init__(self, data, **kwargs):
kwargs.setdefault("stacked", True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault("alpha", 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(
cls,
ax: "Axes",
x,
y,
style=None,
column_num=None,
stacking_id=None,
is_errorbar=False,
**kwds,
):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
line_kwds.pop("label")
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if "color" not in kwds:
kwds["color"] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect]
return res
def _post_plot_logic(self, ax: "Axes", data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = "bar"
_default_rot = 90
orientation = "vertical"
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop("width", 0.5)
pos = kwargs.pop("position", 0.5)
kwargs.setdefault("align", "center")
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop("bottom", 0)
self.left = kwargs.pop("left", 0)
self.log = kwargs.pop("log", False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs["align"] == "edge":
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs["align"] == "edge":
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds["color"] = colors
elif isinstance(colors, dict):
kwds["color"] = colors[label]
else:
kwds["color"] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None):
kwds["ecolor"] = mpl.rcParams["xtick.color"]
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(
ax,
self.ax_pos + w,
y,
self.bar_width,
start=start,
label=label,
log=self.log,
**kwds,
)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(
ax,
self.ax_pos + w,
y,
self.bar_width,
start=start,
label=label,
log=self.log,
**kwds,
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(
ax,
self.ax_pos + (i + 0.5) * w,
y,
w,
start=start,
label=label,
log=self.log,
**kwds,
)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax: "Axes", data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
if self.xticks is not None:
ax.set_xticks(np.array(self.xticks))
else:
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = "barh"
_default_rot = 0
orientation = "horizontal"
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class PiePlot(MPLPlot):
_kind = "pie"
_layout_type = "horizontal"
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError(f"{kind} doesn't allow negative values")
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(num_colors=len(self.data), color_kwds="colors")
self.kwds.setdefault("colors", colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ""
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop("labels", idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get("autopct", None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 23 | 175667 | from __future__ import division, absolute_import, print_function
import tempfile
import sys
import os
import shutil
import warnings
import operator
import io
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.core import *
from numpy.compat import asbytes, getexception, strchar, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4, 5)
self.three = arange(60, dtype=float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3, 2, 1), t)
y = empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = empty((3, 2, 1), dtype=uint64)
y = empty((3, 2, 1), dtype=uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
def test_byteorders(self):
self.assertNotEqual(dtype('<i4'), dtype('>i4'))
self.assertNotEqual(dtype([('a', '<i4')]), dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = zeros((3,3), dtype=c)
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = zeros((3,3), dtype='S5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='U5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='f4,f4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a==b, [True, True])
assert_equal(a!=b, [False, False])
b[1].b = 'c'
assert_equal(a==b, [True, False])
assert_equal(a!=b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False, False])
assert_equal(a!=b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a==b, [False, True])
assert_equal(a!=b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool);
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_test_round(self):
assert_equal(array([1.2, 1.5]).round(), [1, 2])
assert_equal(array(1.5).round(), 2)
assert_equal(array([12.2, 15.5]).round(-1), [10, 20])
assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5])
def test_transpose(self):
a = array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21, 32, 14])
x2=np.array(['my', 'first', 'name'])
x3=np.array([3.1, 4.5, 6.2])
r=np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14, 21, 32]))
assert_equal(r.word, array(['name', 'my', 'first']))
assert_equal(r.number, array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32, 21, 14]))
assert_equal(r.word, array(['first', 'my', 'name']))
assert_equal(r.number, array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21, 32, 14]))
assert_equal(r.word, array(['my', 'first', 'name']))
assert_equal(r.number, array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10); d[1] = 4;
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
@dec.skipif(True) # ufunc override disabled for 1.9
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1,:]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
@dec.skipif(True) # ufunc override disabled for 1.9
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(ndarray):
pass
class OtherNdarraySubclassWithOverride(ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
@dec.skipif(True) # ufunc override disabled for 1.9
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = SomeClass2(r.shape, dtype=r.dtype)
x[...] = r
return x
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
assert_equal(obj * arr, 123)
assert_equal(arr * obj, 321)
assert_equal(arr > obj, "nope")
assert_equal(arr < obj, "yep")
assert_equal(np.multiply(arr, obj), "ufunc")
arr *= obj
assert_equal(arr, 321)
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
assert_equal(np.multiply(arr, obj2), "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1], bool)
assert_array_equal(x[m], array([2]))
def test_mask2(self):
x = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1], [1, 0]], bool)
m3 = array([[0, 1]], bool)
assert_array_equal(x[m], array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], array([2, 5]))
assert_array_equal(x[m3], array([2]))
def test_assign_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1], bool)
x[m] = 5
assert_array_equal(x, array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1], [1, 0]], bool)
m3 = array([[0, 1]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This", "is", "example"])
g2 = array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([sixu("This"), sixu("is"), sixu("example")])
g2 = array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0, -0.1, 0.1])
res = 250*sk[:, newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type('float', 1024, 0, 0, inplace=inplace)
self._clip_type('int', 1024, -120, 100.5, inplace=inplace)
self._clip_type('int', 1024, 0, 0, inplace=inplace)
x = self._clip_type('uint', 1024, -120, 100, expected_min=0,
inplace=inplace)
x = self._clip_type('uint', 1024, 0, 0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ');
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ');
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(ValueError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1)
assert_raises(ValueError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warning_types(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there are
# multiple views involved):
assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10),
[])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
mat = np.eye(3)
# stats for integer types
# fixme:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
#for c in icodes:
#tgt = np.dtype(c).type
#res = f(mat, axis=1, dtype=c).dtype.type
#assert_(res is tgt)
## scalar case
#res = f(mat, axis=None, dtype=c).dtype.type
#assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in fcodes:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,), dtype=int)
self.y = 3*ones((3,), dtype=int)
self.x2 = 2*ones((2, 3), dtype=int)
self.y2 = 3*ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr' : '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[ 0., 1., 2., 19.,],
[ 104., 5., 6., 7.,],
[ 8., 9., 40., 11.,]])
b = arange(6).astype(float)
index = (array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class PriorityNdarray():
__array_priority__ = 1000
def __init__(self, array):
self.array = array
def __lt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array < array)
def __gt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array > array)
def __le__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array <= array)
def __ge__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array >= array)
def __eq__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array == array)
def __ne__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array != array)
class TestArrayPriority(TestCase):
def test_lt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l < r
res2 = l < rp
res3 = lp < r
res4 = lp < rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_gt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l > r
res2 = l > rp
res3 = lp > r
res4 = lp > rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_le(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l <= r
res2 = l <= rp
res3 = lp <= r
res4 = lp <= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ge(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l >= r
res2 = l >= rp
res3 = lp >= r
res4 = lp >= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_eq(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l == r
res2 = l == rp
res3 = lp == r
res4 = lp == rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ne(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l != r
res2 = l != rp
res3 = lp != r
res4 = lp != rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object);
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if __name__ == "__main__":
run_module_suite()
| mit |
ltiao/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
ScottHull/Exoplanet-Pocketknife | old/hefestofilewriter.py | 1 | 8686 | import os
import shutil
import pandas as pd
class _AtomicWeights:
def __init__(self):
self.fe = 55.845
self.mg = 24.305
self.si = 28.086
self.ca = 40.078
self.al = 26.982
self.ti = 47.867
self.na = 22.99
self.o = 15.99
class _OxideCationNumbers:
def __init__(self):
self.fe = 1 # feo, 1
self.mg = 1 # mgo, 1
self.si = 1 # sio2, 1
self.ca = 1 # cao, 1
self.al = 2 # al2o3, 2
self.ti = 1 # tio2, 1
self.na = 2 # na2o, 2
class _OxideWeights:
def __init__(self):
self.atomic_weights = _AtomicWeights()
self.oxide_cation_numbers = _OxideCationNumbers()
self.feo = (self.atomic_weights.fe * self.oxide_cation_numbers.fe) + self.atomic_weights.o
self.mgo = (self.atomic_weights.mg * self.oxide_cation_numbers.mg) + self.atomic_weights.o
self.sio2 = (self.atomic_weights.si * self.oxide_cation_numbers.si) + (self.atomic_weights.o * 2)
self.cao = (self.atomic_weights.ca * self.oxide_cation_numbers.ca) + self.atomic_weights.o
self.al2o3 = (self.atomic_weights.al * self.oxide_cation_numbers.al) + (self.atomic_weights.o * 3)
self.tio2 = (self.atomic_weights.ti * self.oxide_cation_numbers.ti) + (self.atomic_weights.o * 2)
self.na2o = (self.atomic_weights.na * self.oxide_cation_numbers.na) + self.atomic_weights.o
class HeFESTpFileWriter:
def __init__(self, from_path, to_path, temperatures, material):
self.df = pd.read_csv(from_path)
self.to_path = to_path
self.temperatures = temperatures
self.material = material
self.BSP_FILE_FORMAT = "0,20,80,{},0,-2,0\n6,2,4,2\noxides\nSi {} 5.39386 0\nMg {} 2.71075 0\n" \
"Fe {} .79840 0\nCa {} .31431 0\nAl {} .96680 0\n" \
"Na {} .40654 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\nhc\n" \
"phase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\njd\n" \
"phase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\nmgwa\nfewa\n" \
"phase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\nphase ppv\n0\nmppv\n" \
"fppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n1\nqtz\nphase coes\n0\ncoes\n" \
"phase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n0\nneph"
self.MORB_FILE_FORMAT = "0,20,80,{},0,-2,0\n6,2,4,2\noxides\nSi {} 5.33159 0\n" \
"Mg {} 1.37685 0\nFe {} .55527 0\n" \
"Ca {} 1.33440 0\nAl {} 1.82602 0\n" \
"Na {} 0.71860 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\n" \
"hc\nphase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\n" \
"jd\nphase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\n" \
"mgwa\nfewa\nphase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\n" \
"phase ppv\n0\nmppv\nfppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n" \
"1\nqtz\nphase coes\n0\ncoes\nphase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n" \
"0\nneph"
def __oxide_pct_to_cation_pct(self, df, row):
mgo = self.df['MgO'][row].item()
sio2 = self.df['SiO2'][row].item()
feo = self.df['FeO'][row].item()
al2o3 = self.df['Al2O3'][row].item()
na2o = self.df['Na2O'][row].item()
cao = self.df['CaO'][row].item()
tio2 = self.df['TiO2'][row].item()
sum = (mgo + sio2 + feo + al2o3 + na2o + cao + tio2)
c = _OxideCationNumbers()
o = _OxideWeights()
print(o.mgo, o.sio2, o.feo, o.al2o3, o.na2o, o.cao, o.tio2)
mg = (mgo / o.mgo) * c.mg
si = (sio2 / o.sio2) * c.si
fe = (feo / o.feo) * c.fe
al = (al2o3 / o.al2o3) * c.al
na = (na2o / o.na2o) * c.na
ca = (cao / o.cao) * c.ca
ti = (tio2 / o.tio2) * c.ti
sum = (mg + si + fe + al + na + ca + ti)
mg = mg / sum * 100.0
si = si / sum * 100.0
fe = fe / sum * 100.0
al = al / sum * 100.0
na = na / sum * 100.0
ca = ca / sum * 100.0
ti = ti / sum * 100.0
return {
'mg': mg,
'si': si,
'fe': fe,
'al': al,
'na': na,
'ca': ca,
'ti': ti
}
def writefiles(self):
for t in self.temperatures:
print(t)
if os.path.exists(self.to_path + "/" + str(t)):
shutil.rmtree(self.to_path + "/" + str(t))
os.mkdir(self.to_path + "/" + str(t))
for row in self.df.index:
if len(str(self.df['MgO'][row])) > 0:
star = self.df['Star'][row]
print(star)
cations = self.__oxide_pct_to_cation_pct(df=self.df, row=row)
mg = cations['mg']
si = cations['si']
fe = cations['fe']
al = cations['al']
na = cations['na']
ca = cations['ca']
ti = cations['ti']
if self.material.lower() == 'bsp':
bsp_contents = self.BSP_FILE_FORMAT.format(t, si, mg, fe, ca, al, na)
with open(self.to_path + "/" + str(t) + "/{}_{}_{}_HeFESTo_Input_File.txt".format(star, self.material.upper(), t), 'w') as infile:
infile.write(bsp_contents)
infile.close()
else:
morb_contents = self.MORB_FILE_FORMAT.format(t, si, mg, fe, ca, al, na)
with open(self.to_path + "/" + str(t) + "/{}_{}_{}_HeFESTo_Input_File.txt".format(star, self.material.upper(), t), 'w') as infile:
infile.write(morb_contents)
infile.close()
df_paths = [
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_f1200_depleted_lithosphere_oxides.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_f1400_depleted_lithosphere_oxides.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_f1600_depleted_lithosphere_oxides.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_f1200_depleted_lithosphere_oxides.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_f1400_depleted_lithosphere_oxides.csv",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_f1600_depleted_lithosphere_oxides.csv",
]
temperatures = [
[1200],
[1200, 1400],
[1200, 1400, 1600],
[1200],
[1200, 1400],
[1200, 1400, 1600]
]
to_paths = [
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_depleted_lithosphere_compositions_f1200",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_depleted_lithosphere_compositions_f1400",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/adibekyan_depleted_lithosphere_compositions_f1600",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_depleted_lithosphere_compositions_f1200",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_depleted_lithosphere_compositions_f1400",
"/Users/scotthull/Documents - Scott’s MacBook Pro/PhD Research/depleted-lithosphere/kepler_depleted_lithosphere_compositions_f1600"
]
for index, path in enumerate(df_paths):
temps = temperatures[index]
to_path = to_paths[index]
HeFESTpFileWriter(from_path=path, to_path=to_path, temperatures=temps, material="BSP").writefiles()
| cc0-1.0 |
joferkington/mpldatacursor | examples/multi_highlight_example.py | 1 | 1947 | """
An example of highlighting "linked" artists. When one is selected, its partner
will be highlighted in addition to the original artist. Illustrates
subclassing a DataCursor.
"""
import numpy as np
import matplotlib.pyplot as plt
import mpldatacursor
def main():
fig, axes = plt.subplots(ncols=2)
num = 5
xy = np.random.random((num, 2))
lines = []
for i in range(num):
line, = axes[0].plot((i + 1) * np.arange(10))
lines.append(line)
points = []
for x, y in xy:
point, = axes[1].plot([x], [y], linestyle='none', marker='o')
points.append(point)
MultiHighlight(zip(points, lines))
plt.show()
class MultiHighlight(mpldatacursor.HighlightingDataCursor):
"""Highlight "paired" artists. When one artist is selected, both it and
it's linked partner will be highlighted."""
def __init__(self, paired_artists, **kwargs):
"""
Initialization is identical to HighlightingDataCursor except for the
following:
Parameters:
-----------
paired_artists: a sequence of tuples of matplotlib artists
Pairs of matplotlib artists to be highlighted.
Additional keyword arguments are passed on to HighlightingDataCursor.
The "display" keyword argument will be overridden to "single".
"""
# Two-way lookup table
self.artist_map = dict(paired_artists)
self.artist_map.update([pair[::-1] for pair in self.artist_map.items()])
kwargs['display'] = 'single'
artists = self.artist_map.values()
mpldatacursor.HighlightingDataCursor.__init__(self, artists, **kwargs)
def show_highlight(self, artist):
paired_artist = self.artist_map[artist]
mpldatacursor.HighlightingDataCursor.show_highlight(self, artist)
mpldatacursor.HighlightingDataCursor.show_highlight(self, paired_artist)
if __name__ == '__main__':
main()
| mit |
sumitsourabh/opencog | scripts/make_benchmark_graphs.py | 56 | 3139 | #!/usr/bin/env python
# Requires matplotlib for graphing
# reads *_benchmark.csv files as output by atomspace_bm and turns them into
# graphs.
import csv
import numpy as np
import matplotlib.colors as colors
#import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import matplotlib.font_manager as font_manager
import glob
import pdb
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def graph_file(fn,delta_rss=True):
print "Graphing " + fn
records = csv.reader(open(fn,'rb'),delimiter=",")
sizes=[]; times=[]; times_seconds=[]; memories=[]
for row in records:
sizes.append(int(row[0]))
times.append(int(row[1]))
memories.append(int(row[2]))
times_seconds.append(float(row[3]))
left, width = 0.1, 0.8
rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height
rect2 = [left, 0.1, width, 0.4]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor)
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax1.plot(sizes,times_seconds,color='black')
if len(times_seconds) > 1000:
ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue')
if delta_rss:
oldmemories = list(memories)
for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1]
ax2.plot(sizes,memories,color='black')
for label in ax1.get_xticklabels():
label.set_visible(False)
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 7 ticks, pruning the upper and lower so they don't overlap
# with other ticks
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax1.yaxis.set_major_formatter(fmt)
ax2.yaxis.set_major_locator(MyLocator(7, prune='upper'))
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax2.yaxis.set_major_formatter(fmt)
ax2.yaxis.offsetText.set_visible(False)
fig.show()
size = int(fmt.orderOfMagnitude) / 3
labels = ["B","KB","MB","GB"]
label = labels[size]
labels = ["","(10s)","(100s)"]
label += " " + labels[int(fmt.orderOfMagnitude) % 3]
ax2.set_xlabel("AtomSpace Size")
ax2.set_ylabel("RSS " + label)
ax1.set_ylabel("Time (seconds)")
ax1.set_title(fn)
fig.show()
fig.savefig(fn+".png",format="png")
files_to_graph = glob.glob("*_benchmark.csv")
for fn in files_to_graph:
graph_file(fn);
| agpl-3.0 |
jkozerski/meteo | meteo_lcd/month_plot.py | 1 | 8156 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Author:
# Janusz Kozerski (https://github.com/jkozerski)
# This draws plots (of temp, humid, dp and pressure) for prevoius month.
# Plots are kept in files:
# yyyy.mm.dataName.png
import dateutil.parser
import datetime # datetime and timedelta structures
import time
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
# Sqlite3 database
import sqlite3
# choose working dir
working_dir = "/var/www/html/"
data_dir = "/home/pi/meteo/"
hist_dir = working_dir + "hist/"
log_file_path = working_dir + "meteo.log"
db_path = data_dir + "meteo.db"
# Diagiam file names
temp_out_diagram_file = "temp_out.png"
humid_out_diagram_file = "humid_out.png"
dew_point_out_diagram_file = "dew_out.png"
pressure_diagram_file = "pressure.png"
# Converts a string back the datetime structure
def getDateTimeFromISO8601String(s):
d = dateutil.parser.parse(s)
return d
def get_val_month_db(month, year):
if month < 1 or month > 12:
return;
if year < 2000 or year > 9999:
return;
conn = sqlite3.connect(db_path)
c = conn.cursor()
str_time_min = str(year).zfill(4) + "-" + str(month).zfill(2) + "-01T00:00:00"
if month == 12:
str_time_max = str(year+1).zfill(4) + "-" + str(1).zfill(2) + "-01T00:00:00"
else:
str_time_max = str(year).zfill(4) + "-" + str(month+1).zfill(2) + "-01T00:00:00"
#c.execute("SELECT strftime('%s', (?))", (str_time_min, ))
#int_time_min = (c.fetchone())[0]
#c.execute("SELECT strftime('%s', (?))", (str_time_max, ))
#int_time_max = (c.fetchone())[0]
int_time_min = int (time.mktime(getDateTimeFromISO8601String(str_time_min).timetuple()))
int_time_max = int (time.mktime(getDateTimeFromISO8601String(str_time_max).timetuple()))
try:
c.execute("SELECT time, temp, humid, dew_point, pressure FROM log WHERE time >= ? AND time < ?", (int_time_min, int_time_max))
rows = c.fetchall()
# for row in rows:
# print(row)
except Exception as e:
print("Error while get_val_month from db: " + str(e))
conn.close()
return rows
def plot_set_ax_fig (date, time, data, data_len, plot_type, ylabel, title, major_locator, minor_locator, file_name):
# This keeps chart nice-looking
ratio = 0.20
plot_size_inches = 40
fig, ax = plt.subplots()
fig.set_size_inches(plot_size_inches, plot_size_inches)
# Plot data:
ax.plot_date(time, data, plot_type)
ax.set_xlim(time[0], time[data_len])
ax.set(xlabel='', ylabel=ylabel, title=title + " " + str(date.month) + "." + str(date.year))
ax.grid()
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=(0)))
ax.xaxis.set_minor_locator(matplotlib.dates.HourLocator(byhour=(0,6,12,18)))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%m-%d %H:%M"))
ax.yaxis.set_major_locator(MultipleLocator(major_locator))
ax.yaxis.set_minor_locator(MultipleLocator(minor_locator))
ax.tick_params(labeltop=False, labelright=True)
plt.gcf().autofmt_xdate()
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
#print((xmax-xmin)/(ymax-ymin))
ax.set_aspect(abs((xmax-xmin)/(ymax-ymin))*ratio) #, adjustable='box-forced')
fig.savefig(hist_dir + str(date.year) + "." + str(date.month) + "." + file_name, bbox_inches='tight')
plt.close()
# Draw a plot
def draw_plot_month():
# Open log file
lf = open(log_file_path, "r");
# Calculates number lines in log file
num_lines = sum(1 for line in lf)
lf.seek(0)
# This keeps chart nice-looking
ratio = 0.25
plot_size_inches = 28
# Today
today = datetime.datetime.today()
if today.month == 1:
plot_date_begin = datetime.datetime(today.year-1, 12, 1)
else:
plot_date_begin = datetime.datetime(today.year, today.month-1, 1)
plot_date_end = datetime.datetime(today.year, today.month, 1)
# Helpers
j = 0
values_count = 0
lines_to_skip = num_lines - 25000
# This much entries should be more than one month (31 days)
# This will cause that generating plot will take less time
if lines_to_skip < 0:
lines_to_skip = 0;
# Use every x (e.g. every second, or every third) value - this makes chart more 'smooth'
every_x = 1;
t = []; # time axis for plot
t_out = []; # temp out for plot
h_out = []; # humid out for plot
d_out = []; # dew point for plot
p_out = []; # pressure for plot
# From each line of log file create a pairs of meteo data (time, value)
for line in lf:
if lines_to_skip > 0:
lines_to_skip -= 1
continue
j += 1
if j >= every_x:
j = 0
else:
continue
# Parse line
time, temp_in, humid_in, dew_in, temp_out, humid_out, dew_out, pressure = str(line).split(";")
time = getDateTimeFromISO8601String(time)
if time < plot_date_begin:
continue
if time > plot_date_end:
continue
values_count += 1
# Append time for time axis
t.append(time)
# Append meteo data for their axis
t_out.append(float(temp_out))
h_out.append(float(humid_out))
d_out.append(float(dew_out))
p_out.append(float(pressure))
lf.close()
# draw plots for outside values: temperature, humidity, dew piont, pressure
##############
# Temperature
plot_set_ax_fig(today, t, t_out, values_count-1, 'r-', 'Temperatura [C]', 'Wykres temperatury zewnetrznej', 1, 0.5, temp_out_diagram_file)
##############
# Humidity
plot_set_ax_fig(today, t, h_out, values_count-1, 'g-', 'Wilgotnosc wzgledna [%]', 'Wykres wilgotnosci wzglednej', 5, 1, humid_out_diagram_file)
##############
# Dew point
plot_set_ax_fig(today, t, d_out, values_count-1, 'b-', 'Temp. punktu rosy [C]', 'Wykres temperatury punktu rosy', 1, 1, dew_point_out_diagram_file)
##############
# Pressure
plot_set_ax_fig(today, t, p_out, values_count-1, 'm-', 'Cisnienie atm. [hPa]', 'Wykres cisnienia atmosferycznego', 2, 1, pressure_diagram_file)
return
# Draw a plot
def draw_plot_month_db():
# Today
today = datetime.datetime.today()
if today.month == 1:
plot_date_begin = datetime.datetime(today.year-1, 12, 1)
else:
plot_date_begin = datetime.datetime(today.year, today.month-1, 1)
t = []; # time axis for plot
t_out = []; # temp out for plot
h_out = []; # humid out for plot
d_out = []; # dew point for plot
p_out = []; # pressure for plot
rows = get_val_month_db(plot_date_begin.month, plot_date_begin.year) # month, and year
# From each row creates a pairs of meteo data (time, value)
values_count = len(rows)
# Row format: (time, temp, humid, dew_point, pressure)
for row in rows:
# Append time for time axis
t.append(datetime.datetime.fromtimestamp(row[0]))
# Append meteo data for their axis
t_out.append(row[1])
h_out.append(row[2])
d_out.append(row[3])
p_out.append(row[4])
# draw plots for outside values: temperature, humidity, dew piont, pressure
##############
# Temperature
plot_set_ax_fig(plot_date_begin, t, t_out, values_count-1, 'r-', 'Temperatura [C]', 'Wykres temperatury zewnetrznej', 1, 0.5, temp_out_diagram_file)
##############
# Humidity
plot_set_ax_fig(plot_date_begin, t, h_out, values_count-1, 'g-', 'Wilgotnosc wzgledna [%]', 'Wykres wilgotnosci wzglednej', 5, 1, humid_out_diagram_file)
##############
# Dew point
plot_set_ax_fig(plot_date_begin, t, d_out, values_count-1, 'b-', 'Temp. punktu rosy [C]', 'Wykres temperatury punktu rosy', 1, 1, dew_point_out_diagram_file)
##############
# Pressure
plot_set_ax_fig(plot_date_begin, t, p_out, values_count-1, 'm-', 'Cisnienie atm. [hPa]', 'Wykres cisnienia atmosferycznego', 2, 1, pressure_diagram_file)
return
# Main program:
draw_plot_month_db();
#draw_plot_month();
| apache-2.0 |
frankinit/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/reshape/tile.py | 2 | 19190 | """
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
_NS_DTYPE,
ensure_int64,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_integer,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
Timestamp,
to_datetime,
to_timedelta,
)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(
x,
bins,
right=True,
labels=None,
retbins=False,
precision=3,
include_lowest=False,
duplicates="raise",
):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
IntervalIndex : Immutable Index implementing an ordered, sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError("Cannot cut empty array")
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if np.isinf(mn) or np.isinf(mx):
# GH 24314
raise ValueError(
"cannot specify integer `bins` when input data " "contains infinity"
)
elif mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn) if mn != 0 else 0.001
mx += 0.001 * abs(mx) if mx != 0 else 0.001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
if bins.is_overlapping:
raise ValueError("Overlapping IntervalIndex is not accepted.")
else:
if is_datetime64tz_dtype(bins):
bins = np.asarray(bins, dtype=_NS_DTYPE)
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
# GH 26045: cast to float64 to avoid an overflow
if (np.diff(bins.astype("float64")) < 0).any():
raise ValueError("bins must increase monotonically.")
fac, bins = _bins_to_cuts(
x,
bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates,
)
return _postprocess_for_cut(
fac, bins, retbins, x_is_series, series_index, name, dtype
)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates="raise"):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(
x,
bins,
labels=labels,
precision=precision,
include_lowest=True,
dtype=dtype,
duplicates=duplicates,
)
return _postprocess_for_cut(
fac, bins, retbins, x_is_series, series_index, name, dtype
)
def _bins_to_cuts(
x,
bins,
right=True,
labels=None,
precision=3,
include_lowest=False,
dtype=None,
duplicates="raise",
):
if duplicates not in ["raise", "drop"]:
raise ValueError(
"invalid value for 'duplicates' parameter, "
"valid options are: raise, drop"
)
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == "raise":
raise ValueError(
"Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins)
)
else:
bins = unique_bins
side = "left" if right else "right"
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(
bins, precision, right=right, include_lowest=include_lowest, dtype=dtype
)
else:
if len(labels) != len(bins) - 1:
raise ValueError(
"Bin labels must be one fewer than " "the number of bin edges"
)
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.dtype("timedelta64[ns]")
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins, skipna=False)
if is_timedelta64_dtype(dtype):
if bins_dtype in ["timedelta", "timedelta64"]:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ["datetime", "datetime64"]:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype):
bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz)
elif is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = "right" if right else "left"
if is_datetime64tz_dtype(dtype):
formatter = partial(Timestamp, tz=dtype.tz)
adjust = lambda x: x - Timedelta("1ns")
elif is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta("1ns")
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta("1ns")
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex([Interval(v, labels[0].right, closed="right")])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
# Check that the passed array is a Pandas or Numpy object
# We don't want to strip away a Pandas data-type here (e.g. datetimetz)
ndim = getattr(x, "ndim", None)
if ndim is None:
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Input array must be 1 dimensional")
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| apache-2.0 |
southpaw94/MachineLearning | HPTuning/SKPipeline.py | 1 | 1446 | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import StratifiedKFold
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'+\
'/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
# All malignant tumors will be represented as class 1, otherwise, class 0
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size=0.20, random_state=1)
pipe_lr = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), \
('clf', LogisticRegression(random_state=1))])
pipe_lr.fit(X_train, y_train)
print('Test Accuracy: %.3f' % pipe_lr.score(X_test, y_test))
kfold = StratifiedKFold(y = y_train, \
n_folds = 10, \
random_state = 1)
scores = []
for k, (train, test) in enumerate(kfold):
pipe_lr.fit(X_train[train], y_train[train])
score = pipe_lr.score(X_train[test], y_train[test])
scores.append(score)
print('Fold %s, Class dist.: %s, Acc: %.3f' % (k+1, np.bincount(y_train[train]), score))
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
| gpl-2.0 |
fja05680/pinkfish | examples/170.follow-trend/strategy.py | 1 | 3623 | """
A basic long term trend strategy applied separately to several
securities.
1. S&P 500 index closes above its 200 day moving average
2. The stock closes above its upper band, buy
3. S&P 500 index closes below its 200 day moving average
4. The stock closes below its lower band, sell your long position.
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
default_options = {
'use_adj' : False,
'use_cache' : True,
'sma_period': 200,
'percent_band' : 0,
'use_regime_filter' : True
}
class Strategy:
def __init__(self, symbol, capital, start, end, options=default_options):
self.symbol = symbol
self.capital = capital
self.start = start
self.end = end
self.options = options.copy()
self.ts = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high; low = row.low; close = row.close;
end_flag = pf.is_last_row(self.ts, i)
upper_band = row.sma * (1 + self.options['percent_band'] / 100)
lower_band = row.sma * (1 - self.options['percent_band'] / 100)
# Sell Logic
# First we check if an existing position in symbol
# should be sold
# - Sell if (use_regime_filter and regime < 0)
# - Sell if price closes below lower_band
# - Sell if end of data
if self.tlog.shares > 0:
if ((self.options['use_regime_filter'] and row.regime < 0)
or close < lower_band
or end_flag):
self.tlog.sell(date, close)
# Buy Logic
# First we check to see if there is an existing position,
# if so do nothing
# - Buy if (regime > 0 or not use_regime_filter)
# and price closes above upper_band
# and (use_regime_filter and regime > 0)
else:
if ((row.regime > 0 or not self.options['use_regime_filter'])
and close > upper_band):
self.tlog.buy(date, close)
# Record daily balance
self.dbal.append(date, high, low, close)
def run(self):
self.ts = pf.fetch_timeseries(self.symbol, use_cache=self.options['use_cache'])
self.ts = pf.select_tradeperiod(self.ts, self.start,
self.end, self.options['use_adj'])
# Add technical indicator: day sma
self.ts['sma'] = SMA(self.ts, timeperiod=self.options['sma_period'])
# add S&P500 200 sma regime filter
ts = pf.fetch_timeseries('^GSPC')
ts = pf.select_tradeperiod(ts, self.start, self.end, use_adj=False)
self.ts['regime'] = \
pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200)
self.ts, self.start = pf.finalize_timeseries(self.ts, self.start)
self.tlog = pf.TradeLog(self.symbol)
self.dbal = pf.DailyBal()
self._algo()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog = self.tlog.get_log_raw()
self.tlog = self.tlog.get_log()
self.dbal = self.dbal.get_log(self.tlog)
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/16e013a9723e79ad270873c47053f4b5/plot_source_space_snr.py | 1 | 3009 | # -*- coding: utf-8 -*-
"""
===============================
Computing source space SNR
===============================
This example shows how to compute and plot source space SNR as in [1]_.
"""
# Author: Padma Sundaram <[email protected]>
# Kaisu Lankinen <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 2
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
# Read data
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fname_fwd)
cov = mne.read_cov(fname_cov)
# Read inverse operator:
inv_op = make_inverse_operator(evoked.info, fwd, cov, fixed=True, verbose=True)
# Calculate MNE:
snr = 3.0
lambda2 = 1.0 / snr ** 2
stc = apply_inverse(evoked, inv_op, lambda2, 'MNE', verbose=True)
# Calculate SNR in source space:
snr_stc = stc.estimate_snr(evoked.info, fwd, cov)
# Plot an average SNR across source points over time:
ave = np.mean(snr_stc.data, axis=0)
fig, ax = plt.subplots()
ax.plot(evoked.times, ave)
ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG')
fig.tight_layout()
# Find time point of maximum SNR:
maxidx = np.argmax(ave)
# Plot SNR on source space at the time point of maximum SNR:
kwargs = dict(initial_time=evoked.times[maxidx], hemi='split',
views=['lat', 'med'], subjects_dir=subjects_dir, size=(600, 600),
clim=dict(kind='value', lims=(-100, -70, -40)),
transparent=True, colormap='viridis')
brain = snr_stc.plot(**kwargs)
###############################################################################
# EEG
# ---
# Next we do the same for EEG and plot the result on the cortex:
evoked_eeg = evoked.copy().pick_types(eeg=True, meg=False)
inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True,
verbose=True)
stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, 'MNE', verbose=True)
snr_stc_eeg = stc_eeg.estimate_snr(evoked_eeg.info, fwd, cov)
brain = snr_stc_eeg.plot(**kwargs)
###############################################################################
# The same can be done for MEG, which looks more similar to the MEG-EEG case
# than the EEG case does.
#
# References
# ----------
# .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, D.,
# Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). Mapping the
# Signal-To-Noise-Ratios of Cortical Sources in Magnetoencephalography
# and Electroencephalography. Human Brain Mapping, 30(4), 1077–1086.
# doi:10.1002/hbm.20571
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/metrics/tests/test_regression.py | 18 | 6065 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| mit |
kiryx/pagmo | PyGMO/core/__init__.py | 3 | 20705 | # -*- coding: utf-8 -*-
from PyGMO.core._core import *
import threading as _threading
import signal as _signal
import os as _os
__doc__ = 'PyGMO core module.'
__all__ = [
'archipelago',
'base_island',
'champion',
'distribution_type',
'individual',
'ipy_island',
'island',
'local_island',
'migration_direction',
'population',
'py_island']
_orig_signal = _signal.getsignal(_signal.SIGINT)
_main_pid = _os.getpid()
# Alternative signal handler which ignores sigint if called from a child
# process.
def _sigint_handler(signum, frame):
import os
if os.getpid() == _main_pid:
_orig_signal(signum, frame)
_signal.signal(_signal.SIGINT, _sigint_handler)
# Global lock used when starting processes.
_process_lock = _threading.Lock()
# Raw C++ base island class.
_base_island = _core._base_island
class base_island(_core._base_island):
def __init__(self, *args):
if len(args) == 0:
raise ValueError(
"Cannot initialise base island without parameters for the constructor.")
_core._base_island.__init__(self, *args)
def get_name(self):
return str(type(self))
def __get_deepcopy__(self):
from copy import deepcopy
return deepcopy(self)
def _generic_island_ctor(self, *args, **kwargs):
"""Unnamed arguments:
#. algorithm
#. problem or population
#. number of individuals (optional and valid only if the second argument is a problem, defaults to 0 if not specified)
Keyword arguments:
* *s_policy* -- migration selection policy (defaults to 'best selection' policy)
* *r_policy* -- migration replacement policy (defaults to 'fair replacement' policy)
"""
from PyGMO.algorithm._algorithm import _base as _base_algorithm
from PyGMO.algorithm import base as base_algorithm
from PyGMO.problem._problem import _base as _base_problem
from PyGMO.problem._problem import _base_stochastic as _base_problem_stochastic
from PyGMO.problem import base as base_problem
from PyGMO.problem import base_stochastic as base_problem_stochastic
from PyGMO.migration._migration import best_s_policy, fair_r_policy, _base_s_policy, _base_r_policy
if len(args) < 2 or len(args) > 3:
raise ValueError(
"Unnamed arguments list must have either 2 or three elements, but %d elements were found instead." %
(len(args),))
if not isinstance(args[0], _base_algorithm):
raise TypeError("The first unnamed argument must be an algorithm.")
ctor_args = [args[0]]
if isinstance(args[1], _base_problem) or isinstance(args[1], _base_problem_stochastic):
ctor_args.append(args[1])
if len(args) == 3:
if not isinstance(args[2], int):
raise TypeError(
"Please provide an integer for the number of individuals in the island.")
ctor_args.append(args[2])
else:
ctor_args.append(0)
elif isinstance(args[1], population):
if len(args) == 3:
raise ValueError(
"When the second unnamed argument is a population, there cannot be a third unnamed argument.")
ctor_args.append(args[1])
else:
raise TypeError(
"The second unnamed argument must be either a problem or a population.")
if 's_policy' in kwargs:
ctor_args.append(kwargs['s_policy'])
else:
ctor_args.append(best_s_policy())
if not isinstance(ctor_args[-1], _base_s_policy):
raise TypeError("s_policy must be a migration selection policy.")
if 'r_policy' in kwargs:
ctor_args.append(kwargs['r_policy'])
else:
ctor_args.append(fair_r_policy())
if not isinstance(ctor_args[-1], _base_r_policy):
raise TypeError("r_policy must be a migration replacement policy.")
if isinstance(self, base_island):
super(type(self), self).__init__(*ctor_args)
elif isinstance(self, _base_island):
self.__original_init__(*ctor_args)
else:
assert(self is None)
n_pythonic_items = 0
if isinstance(args[0], base_algorithm):
n_pythonic_items += 1
if isinstance(args[1], base_problem) or isinstance(args[1], base_problem_stochastic):
n_pythonic_items += 1
elif isinstance(args[1], population) and (isinstance(args[1].problem, base_problem) or isinstance(args[1], base_problem_stochastic)):
n_pythonic_items += 1
if n_pythonic_items > 0:
return py_island(*args, **kwargs)
else:
return local_island(*args, **kwargs)
local_island.__original_init__ = local_island.__init__
local_island.__init__ = _generic_island_ctor
# This is the function that will be called by the separate process
# spawned from py_island.
def _process_target(q, a, p):
try:
tmp = a.evolve(p)
q.put(tmp)
except BaseException as e:
q.put(e)
class py_island(base_island):
"""Python island.
This island will launch evolutions using the multiprocessing module, available since Python 2.6.
Each evolution is transparently dispatched to a Python interpreter in a separate process.
"""
__init__ = _generic_island_ctor
def _perform_evolution(self, algo, pop):
try:
import multiprocessing as mp
q = mp.Queue()
# Apparently creating/starting processes is _not_ thread safe:
# http://bugs.python.org/issue1731717
# http://stackoverflow.com/questions/1359795/error-while-using-multiprocessing-module-in-a-python-daemon
# Protect with a global lock.
with _process_lock:
process = mp.Process(
target=_process_target, args=(q, algo, pop))
process.start()
retval = q.get()
with _process_lock:
process.join()
if isinstance(retval, BaseException):
raise retval
return retval
except BaseException as e:
print('Exception caught during evolution:')
print(e)
raise RuntimeError()
def get_name(self):
return "Python multiprocessing island"
# This is the function that will be called by the task client
# in ipy_island.
def _maptask_target(a, p):
try:
return a.evolve(p)
except BaseException as e:
return e
class ipy_island(base_island):
"""Parallel IPython island.
This island will launch evolutions using IPython's MapTask interface. The evolution will be dispatched
to IPython engines that, depending on the configuration of IPython/ipcluster, can reside either on the
local machine or on other remote machines.
See: http://ipython.scipy.org/doc/stable/html/parallel/index.html
"""
# NOTE: when using an IPython island, on quitting IPython there might be a warning message
# reporting an exception being ignored. This seems to be a problem in the foolscap library:
# http://foolscap.lothar.com/trac/ticket/147
# Hopefully it will be fixed in the next versions of the library.
__init__ = _generic_island_ctor
def _perform_evolution(self, algo, pop):
try:
from IPython.kernel.client import TaskClient, MapTask
# Create task client.
tc = TaskClient()
# Create the task.
mt = MapTask(_maptask_target, args=(algo, pop))
# Run the task.
task_id = tc.run(mt)
# Get retval.
retval = tc.get_task_result(task_id, block=True)
if isinstance(retval, BaseException):
raise retval
return retval
except BaseException as e:
print('Exception caught during evolution:')
print(e)
raise RuntimeError()
def get_name(self):
return "Parallel IPython island"
def island(*args, **kwargs):
return _generic_island_ctor(None, *args, **kwargs)
island.__doc__ = '\n'.join(['Island factory function.\n\nThis function will return an instance of an island object\nbuilt according to the following rule: ' +
'if the arguments include\neither a pythonic problem or a pythonic algorithm, then an instance\nof :class:`py_island` will be returned; ' +
'otherwise, an instance of\n:class:`local_island` will be returned.'] +
[s.replace('\t', '') for s in _generic_island_ctor.__doc__.split('\n')])
# The following is necessary for Python 2. s remains in the workspace and will cause the error:
# AttributeError: 'module' object has no attribute 's'
# However in Python 3 s is not anylonger in the workspace and del s will
# cause an exception!
if 's' in globals():
del s
def _get_island_list():
from PyGMO import core
names = [n for n in dir(core) if not n.startswith(
'_') and not n.startswith('base') and n.endswith('_island')]
try:
from IPython.kernel.client import TaskClient, MapTask
except ImportError:
names = [n for n in names if n != 'ipy_island']
return [core.__dict__[n] for n in names]
def _generic_archi_ctor(self, *args, **kwargs):
"""
Unnamed arguments (optional):
#. algorithm
#. problem
#. number of islands
#. number individual in the population
Keyword arguments:
* *topology* -- migration topology (defaults to unconnected)
* *distribution_type* -- distribution_type (defaults to distribution_type.point_to_point)
* *migration_direction* -- migration_direction (defaults to migration_direction.destination)
"""
from PyGMO import topology, algorithm, problem
from difflib import get_close_matches
if not((len(args) == 4) or (len(args) == 0)):
raise ValueError(
"Unnamed arguments list, when present, must be of length 4, but %d elements were found instead" %
(len(args),))
# Append everything in the same list of constructor arguments
ctor_args = []
for i in args:
ctor_args.append(i)
# Pop all known keywords out of kwargs and add a default value if not
# provided
# unconnected is default
ctor_args.append(kwargs.pop('topology', topology.unconnected()))
# point-to-point is default
ctor_args.append(
kwargs.pop('distribution_type', distribution_type.point_to_point))
# destination is default
ctor_args.append(
kwargs.pop('migration_direction', migration_direction.destination))
# Check for unknown keywords
kwlist = ['topology', 'distribution_type', 'migration_direction']
if kwargs:
s = "The following unknown keyworded argument was passed to the construtor: "
for kw in kwargs:
s += kw
spam = get_close_matches(kw, kwlist)
if spam:
s += " (Did you mean %s?), " % spam[0]
else:
s += ", "
raise ValueError(s[:-2])
# Constructs an empty archipelago with no islands using the C++ constructor
self.__original_init__(*ctor_args[-3:])
# We now push back the correct island type if required
if (len(args)) == 4:
if not isinstance(args[0], algorithm._base):
raise TypeError("The first unnamed argument must be an algorithm")
if not (isinstance(args[1], problem._base) or isinstance(args[1], problem._base_stochastic)):
raise TypeError("The second unnamed argument must be a problem")
if not isinstance(args[2], int):
raise TypeError(
"The third unnamed argument must be an integer (i.e. number of islands)")
if not isinstance(args[3], int):
raise TypeError(
"The fourth unnamed argument must be an integer (i.e. population size)")
for n in range(args[2]):
self.push_back(island(args[0], args[1], args[3]))
archipelago.__original_init__ = archipelago.__init__
archipelago.__init__ = _generic_archi_ctor
def _archipelago_draw(
self,
layout='spring',
n_color='fitness',
n_size=15,
n_alpha=0.5,
e_alpha=0.1,
e_arrows=False,
scale_by_degree=False,
cmap='default'):
"""
Draw a visualization of the archipelago using networkx.
USAGE: pos = archipelago.draw(layout = 'spring', color = 'fitness', n_size = 15, scale_by_degree = False, n_alpha = 0.5, e_alpha = 0.1, cmap = 'default', e_arrows=False)
* layout: Network layout. Can be 'spring' or 'circular' or a list of values pos returned
by a previous call of the method (so that positions of the islands can be kept fixed.
* n_color = Defines the color code for the nodes. Can be one of 'fitness', 'links', ... or the standard matplotlib 'blue' .. etc.
* n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True.
* n_alpha: Transparency of nodes. Takes value between 0 and 1.
* e_arrows: Plots arrows on the edges for directed graphs
* e_elpha: Transparency of edges. Takes value between 0 and 1.
* scale_by_degree: When True, nodes will be sized proportional to their degree.
* cmap: color map. one in matplotlib.pyplot.cm
"""
try:
import networkx as nx
except ImportError:
raise ImportError('Could not import the networkx module.')
try:
import matplotlib.pyplot as pl
except ImportError:
raise ImportError('Could not improt the MatPlotLib module.')
# We set the graph in networkx
t = self.topology
G = t.to_networkx()
# We scale the node sizes
node_sizes = list(range(nx.number_of_nodes(G)))
for i in range(nx.number_of_nodes(G)):
if scale_by_degree:
node_sizes[i] = nx.degree(G, i) * n_size
else:
node_sizes[i] = n_size
# We compute the layout
if layout == 'spring':
pos = nx.spring_layout(G)
elif layout == "circular":
pos = nx.circular_layout(G)
else:
pos = layout
# We compute the color_code
if n_color == 'fitness':
node_colors = [-isl.population.champion.f[0] for isl in self]
m = min(node_colors)
M = max(node_colors)
elif n_color == 'links':
m = min(node_colors)
M = max(node_colors)
node_colors = [
t.get_num_adjacent_vertices(i) for i in range(len(self))]
elif n_color == 'rank':
vec = [-isl.population.champion.f[0] for isl in self]
node_colors = sorted(list(range(len(vec))), key=vec.__getitem__)
M = max(node_colors)
m = min(node_colors)
else:
node_colors = n_color
m = 0
M = 0
if not m == M:
node_colors = [(node_colors[i] - float(m)) / (M - m)
for i in range(len(self))]
# And we draw the archipelago .....
ax = pl.figure()
if cmap == 'default':
cmap = pl.cm.Reds_r
nx.draw_networkx_nodes(
G,
pos,
nodelist=list(
range(
len(self))),
node_color=node_colors,
cmap=cmap,
node_size=node_sizes,
alpha=n_alpha)
nx.draw_networkx_edges(G, pos, alpha=e_alpha, arrows=e_arrows)
pl.axis('off')
pl.show()
return pos
archipelago.draw = _archipelago_draw
def _pop_ctor(self, prob_or_pop, n_individuals=0, seed=None):
"""
Constructs a population.
Popopulation can be constructed in two ways, specified by prob_or_pop. If
prob_or_pop is a population (see USAGE 2 below), the other two arguments
are ignored, and the population is constructed by performing a deep-copy of
the provided population.
USAGE 1: pop = population(problem.zdt(), n_individuals=100, seed=1234)
* prob_or_prob: problem to be associated with the population
* n_individuals: number of individuals in the population
* seed: seed used to randomly initialize the individuals
USAGE 2:
from PyGMO import *
pop1 = population(problem.schwefel(50), 10) #population with 10 individuals
pop2 = population(pop1) # pop2 is a copy of pop1
"""
arg_list = []
arg_list.append(prob_or_pop)
# For construction by copying, ignore the rest of the arguments (could be
# the default kwargs).
if not isinstance(prob_or_pop, population):
arg_list.append(n_individuals)
if seed is not None:
arg_list.append(seed)
return self._original_init(*arg_list)
population._original_init = population.__init__
population.__init__ = _pop_ctor
def _pop_plot_pareto_fronts(
pop,
rgb=(
0,
0,
0),
comp = [
0,
1],
symbol = 'o',
size = 6,
fronts=[]):
"""
Plots the population pareto front in a 2-D graph
USAGE: pop.plot_pareto_front(comp = [0,1], rgb=(0,1,0))
* comp: components of the fitness function to plot in the 2-D window
* rgb: specify the color of the 1st front (use strong colors here)
* symbol: marker for the individual
* size: size of the markersymbol
* fronts: list of fronts to be plotted (use [0] to only show the first)
"""
from numpy import linspace
import matplotlib.pyplot as plt
if len(comp) != 2:
raise ValueError(
'Invalid components of the objective function selected for plot')
p_dim = pop.problem.f_dimension
if p_dim == 1:
raise ValueError(
'Pareto fronts of a 1-dimensional problem cannot be plotted')
if not all([c in range(0, p_dim) for c in comp]):
raise ValueError(
'You need to select valid components of the objective function')
p_list = pop.compute_pareto_fronts()
if (len(fronts) > 0):
n = len(p_list)
consistent = [d < n for d in fronts]
if consistent.count(False) > 0:
raise ValueError(
'Check your fronts list, there seem to be not enough fronts')
p_list = [p_list[idx] for idx in fronts]
cl = list(zip(linspace(0.9 if rgb[0] else 0.1, 0.9, len(p_list)),
linspace(0.9 if rgb[1] else 0.1, 0.9, len(p_list)),
linspace(0.9 if rgb[2] else 0.1, 0.9, len(p_list))))
for id_f, f in enumerate(p_list):
for ind in f:
plt.plot([pop[ind].cur_f[comp[0]]],
[pop[ind].cur_f[comp[1]]],
symbol,
color=cl[id_f], markersize=size)
x = [pop[ind].cur_f[comp[0]] for ind in f]
y = [pop[ind].cur_f[comp[1]] for ind in f]
tmp = [(a, b) for a, b in zip(x, y)]
tmp = sorted(tmp, key=lambda k: k[0])
plt.step([c[0] for c in tmp], [c[1]
for c in tmp], color=cl[id_f], where='post')
return plt.gca()
population.plot_pareto_fronts = _pop_plot_pareto_fronts
def _pop_race(self, n_winners, min_trials=0, max_feval=500,
delta=0.05, racers_idx=[], race_best=True, screen_output=False):
"""
Races individuals in a population
USAGE: pop.race(n_winners, min_trials = 0, max_feval = 500, delta = 0.05, racers_idx = [], race_best=True, screen_output=False)
* n_winners: number of winners in the race
* min_trials: minimum amount of evaluations before an individual can stop racing
* max_feval: budget for objective function evaluation
* delta: Statistical test confidence
* racers_idx: indices of the individuals in pop to be raced
* race_best: when True winners are the best, otherwise winners are the worst
* screen_output: produces some screen output at each iteration of the race
"""
arg_list = []
arg_list.append(n_winners)
arg_list.append(min_trials)
arg_list.append(max_feval)
arg_list.append(delta)
arg_list.append(racers_idx)
arg_list.append(race_best)
arg_list.append(screen_output)
return self._orig_race(*arg_list)
population._orig_race = population.race
population.race = _pop_race
def _pop_repair(self, idx, repair_algorithm):
"""
Repairs the individual at the given position
USAGE: pop.repair(idx, repair_algorithm = _algorithm.jde())
* idx: index of the individual to repair
repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1.
"""
arg_list = []
arg_list.append(idx)
arg_list.append(repair_algorithm)
return self._orig_repair(*arg_list)
population._orig_repair = population.repair
population.repair = _pop_repair
| gpl-3.0 |
quasiben/bokeh | bokeh/models/sources.py | 6 | 9916 | from __future__ import absolute_import
from ..core import validation
from ..core.validation.errors import COLUMN_LENGTHS
from ..core.properties import abstract
from ..core.properties import Any, Int, String, Instance, List, Dict, Bool, Enum, JSON
from ..model import Model
from ..util.dependencies import import_optional
from ..util.deprecate import deprecated
from ..util.serialization import transform_column_source_data
from .callbacks import Callback
pd = import_optional('pandas')
@abstract
class DataSource(Model):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
selected = Dict(String, Dict(String, Any), default={
'0d': {'glyph': None, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single argument that
is a dict or pandas.DataFrame, that argument is used as the value for the
"data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
ColumnDataSource(df) # same as ColumnDataSource(data=df)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
for name, data in raw_data.items():
self.add(data, name)
@staticmethod
def _data_from_df(df):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = df.index
new_data = {}
for colname in df:
new_data[colname] = df[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
@classmethod
@deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
import warnings
warnings.warn("Method deprecated in Bokeh 0.9.3")
return cls._data_from_df(data)
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def _to_json_like(self, include_defaults):
attrs = super(ColumnDataSource, self)._to_json_like(include_defaults=include_defaults)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
@deprecated("Bokeh 0.11.0", "bokeh.io.push_notebook")
def push_notebook(self):
""" Update a data source for a plot in a Jupyter notebook.
This function can be be used to update data in plot data sources
in the Jupyter notebook, without having to use the Bokeh server.
.. warning::
This function has been deprecated. Please use
``bokeh.io.push_notebook()`` which will push all changes
(not just data sources) to the last shown plot in a Jupyter
notebook.
Returns:
None
"""
from bokeh.io import push_notebook
push_notebook()
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
def stream(self, new_data, rollover=None):
import numpy as np
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError("Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra))))
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
lengths = set()
for x in new_data.values():
if isinstance(x, np.ndarray):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
self.data._stream(self.document, self, new_data, rollover)
class GeoJSONDataSource(ColumnDataSource):
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
HTTP headers to set for the Ajax request.
""")
| bsd-3-clause |
michaelpacer/networkx | examples/drawing/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
JuanMatSa/PyFME | examples/example_005.py | 2 | 3361 | # -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Example
-------
Cessna 310, ISA1976 integrated with Flat Earth (euler angles).
Example with trimmed aircraft: stationary, turn during ascent.
The main purpose of this example is to check if the aircraft trimmed in a given
state maintains the trimmed flight condition.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyfme.aircrafts import Cessna310
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
aircraft = Cessna310()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
# Initial conditions.
TAS = 312.5 * 0.3048 # m/s
h0 = 8000 * 0.3048 # m
psi0 = 1 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.1 # rad/s
gamma0 = 0.05 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'hor_tail_incidence': 0.00,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=2)
print(results)
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 150 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
# print(my_simulation.par_dict)
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3*ii:3*ii+3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
| mit |
ExeClim/Isca | src/extra/python/isca/land_generator_fn.py | 4 | 11099 | # Function to allow land to be generated for a range of scenarios
# Land Options:
# 'square' (default) Square block of land with boundaries specified by boundaries keyword, a list of 4 floats in the form [S,N,W,E]
# 'continents_old' Choose continents from the original continent set-up adapted from the Sauliere 2012 paper (Jan 16), including North and South America, Eurasia, and Africa.
# 'continents' Choose continents from a newer continet set-up allowing addition of India, Australia, and South East Asia.
# If continents keyword is set to 'all' (default), then this will include all possibilities for the given set-up
# Alternatively, continents can be set to a list of strings containing options:
# NA - North America
# SA - South America
# EA - Eurasia
# AF - Africa
# OZ - Australia
# IN - India
# SEA - South East Asia
# Topography Options:
#'none' (default) Topography set to zero everywhere
#'sauliere2012' Choose mountains from Sauliere 2012 configuration using mountains keyword. Default is 'all', alternatively only 'rockys' or 'tibet' may be specified
#'gaussian' Use parameters specified in topo_gauss keyword to set up a Gaussian mountain. topo_gauss should be a list in the form: [central_lat,central_lon,radius_degrees,std_dev,height]
# Topography boundary options:
# If waterworld keyword is set to False (default), then topography can only be non-zero on continents - important as topography has a Gaussian structure and tends exponentially to zero.
# If waterworld keyword is set to True, aquamountains are possible - extra work needed here to deal with exponential issues!
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import os
def write_land(exp,land_mode='square',boundaries=[20.,60.,20.,60.],continents=['all'],topo_mode='none',mountains=['all'],topo_gauss=[40.,40.,20.,10.,3500.],waterworld=False):
# Common features of set-ups
# specify resolution
t_res = 42
#read in grid from approriate file
GFDL_BASE = os.environ['GFDL_BASE']
resolution_file = Dataset(GFDL_BASE + 'src/extra/python/scripts/gfdl_grid_files/t'+str(t_res)+'.nc', 'r', format='NETCDF3_CLASSIC')
lons = resolution_file.variables['lon'][:]
lats = resolution_file.variables['lat'][:]
lonb = resolution_file.variables['lonb'][:]
latb = resolution_file.variables['latb'][:]
nlon=lons.shape[0]
nlat=lats.shape[0]
topo_array = np.zeros((nlat,nlon))
land_array = np.zeros((nlat,nlon))
#make 2d arrays of latitude and longitude
lon_array, lat_array = np.meshgrid(lons,lats)
lonb_array, latb_array = np.meshgrid(lonb,latb)
#create dictionary for continents
cont_dic = {'NA':0, 'SA':1, 'EA':2, 'AF':3, 'OZ':4, 'IN':5, 'SEA':6}
# Firstly determine the land set-up to be used
# 1) Set-up in which a square of land is included
if land_mode=='square':
idx = (boundaries[0] <= lat_array) & (lat_array < boundaries[1]) & (boundaries[2] < lon_array) & (boundaries[3] > lon_array)
land_array[idx] = 1.0
# 2) Set-up in which some or all of 'original' continents are included
elif land_mode=='continents_old': #Older configuration of continents: Addition of India and SE Asia required some restructuring. This may be removed once obsolete.
idx_c = np.zeros((4,nlat,nlon), dtype=bool)
idx_c[0,:,:] = (103.-43./40.*(lon_array-180) < lat_array) & ((lon_array-180)*43./50. -51.8 < lat_array) &( lat_array < 60.) #North America
idx_c[1,:,:] = (737.-7.2*(lon_array-180) < lat_array) & ((lon_array-180)*10./7. + -212.1 < lat_array) &( lat_array < -22./45*(lon_array-180) +65.9) #South America
eurasia_pos = (17. <= lat_array) & (lat_array < 60.) & (-5. < lon_array) & ( 43./40.*lon_array -101.25 < lat_array)
eurasia_neg = (17. <= lat_array) & (lat_array < 60.) & (355. < lon_array)
idx_c[2,:,:] = eurasia_pos + eurasia_neg #Eurasia
africa_pos = (lat_array < 17.) & (-52./27.*lon_array + 7.37 < lat_array) & (52./38.*lon_array -65.1 < lat_array)
africa_neg = (lat_array < 17.) & (-52./27.*(lon_array-360) + 7.37 < lat_array)
idx_c[3,:,:] = africa_pos + africa_neg #Africa
if 'all' in continents:
idx = idx_c[0,:,:] + idx_c[1,:,:] + idx_c[2,:,:] + idx_c[3,:,:]
land_array[idx] = 1.
else:
idx = np.zeros((nlat,nlon), dtype=bool)
for cont in continents:
idx = idx + idx_c[cont_dic[cont],:,:]
land_array[idx] = 1.0
# 2) Set-up in which some or all of 'new' continents are included
elif land_mode=='continents':
idx_c = np.zeros((7,nlat,nlon), dtype=bool)
idx_c[0,:,:] = (103.-43./40.*(lon_array-180) < lat_array) & ((lon_array-180)*43./50. -51.8 < lat_array) &( lat_array < 60.) #North America
idx_c[1,:,:] = (737.-7.2*(lon_array-180) < lat_array) & ((lon_array-180)*10./7. + -212.1 < lat_array) &( lat_array < -22./45*(lon_array-180) +65.9) #South America
eurasia_pos = (23. <= lat_array) & (lat_array < 60.) & (-8. < lon_array) & ( 43./40.*lon_array -101.25 < lat_array)
eurasia_neg = (23. <= lat_array) & (lat_array < 60.) & (352. < lon_array)
idx_c[2,:,:] = eurasia_pos + eurasia_neg #Eurasia
africa_pos = (lat_array < 23.) & (-52./27.*lon_array + 7.59 < lat_array) & (52./38.*lon_array -65.1 < lat_array)
africa_neg = (lat_array < 23.) & (-52./27.*(lon_array-360) + 7.59 < lat_array)
idx_c[3,:,:] = africa_pos + africa_neg #Africa
idx_c[4,:,:] = (lat_array > - 35.) & (lat_array < -17.) & (lon_array > 115.) & (lon_array < 150.) #Australia
idx_c[5,:,:] = (lat_array < 23.) & (-15./8.*lon_array + 152 < lat_array) & (15./13.*lon_array - 81 < lat_array) #India
idx_c[6,:,:] = (lat_array < 23.) & ( 43./40.*lon_array -101.25 < lat_array) & (-14./13.*lon_array +120 < lat_array) #South East Asia
if 'all' in continents:
idx = idx_c[0,:,:] + idx_c[1,:,:] + idx_c[2,:,:] + idx_c[3,:,:] + idx_c[4,:,:] + idx_c[5,:,:] + idx_c[6,:,:]
land_array[idx] = 1.
else:
idx = np.zeros((nlat,nlon), dtype=bool)
for cont in continents:
idx = idx + idx_c[cont_dic[cont],:,:]
land_array[idx] = 1.
elif land_mode=='none':
land_array = np.zeros((nlat,nlon))
# Next produce a topography array
if topo_mode == 'none':
topo_array = np.zeros((nlat,nlon))
elif topo_mode == 'sauliere2012':
# Rockys from Sauliere 2012
h_0 = 2670.
central_lon = 247.5
central_lat = 40.
L_1 = 7.5
L_2 = 20.
gamma_1 = 42.
gamma_2 = 42.
delta_1 = ((lon_array - central_lon)*np.cos(np.radians(gamma_1)) + (lat_array - central_lat)*np.sin(np.radians(gamma_1)))/L_1
delta_2 = (-(lon_array - central_lon)*np.sin(np.radians(gamma_2)) + (lat_array - central_lat)*np.cos(np.radians(gamma_2)))/L_2
h_arr_rockys = h_0 * np.exp(-(delta_1**2. + delta_2**2.))
idx_rockys = (h_arr_rockys / h_0 > 0.05) #s make sure exponentials are cut at some point - use the value from p70 of Brayshaw's thesis.
#Tibet from Sauliere 2012
h_0 = 5700.
central_lon = 82.5
central_lat = 28
L_1 = 12.5
L_2 = 12.5
gamma_1 = -49.5
gamma_2 = -18.
delta_1 = ((lon_array - central_lon)*np.cos(np.radians(gamma_1)) + (lat_array - central_lat)*np.sin(np.radians(gamma_1)))/L_1
delta_2 = (-(lon_array - central_lon)*np.sin(np.radians(gamma_2)) + (lat_array - central_lat)*np.cos(np.radians(gamma_2)))/L_2
h_arr_tibet_no_amp = np.exp(-(delta_1**2.))*(1./delta_2)*np.exp(-0.5*(np.log(delta_2))**2.)
maxval = np.nanmax(h_arr_tibet_no_amp) #For some reason my maximum value of h_arr_tibet_no_amp > 1. Renormalise so h_0 sets amplitude.
h_arr_tibet = (h_arr_tibet_no_amp/maxval)*h_0
idx_tibet = (h_arr_tibet / h_0 > 0.05)
if 'all' in mountains:
topo_array[idx_rockys] = h_arr_rockys[idx_rockys]
topo_array[idx_tibet] = h_arr_tibet[idx_tibet]
elif 'rockys' in mountains:
topo_array[idx_rockys] = h_arr_rockys[idx_rockys]
elif 'tibet' in mountains:
topo_array[idx_tibet] = h_arr_tibet[idx_tibet]
else:
print('No valid mountain options detected for Sauliere 2012 topography')
elif topo_mode == 'gaussian':
#Options to define simple Gaussian Mountain
central_lat = topo_gauss[0]
central_lon = topo_gauss[1]
radius_degrees = topo_gauss[2]
std_dev = topo_gauss[3]
height = topo_gauss[4]
rsqd_array = np.sqrt((lon_array - central_lon)**2.+(lat_array - central_lat)**2.)
#generalise to ellipse - needs checking but may be useful later (RG)
#ax_rot = 1. #gradient of new x axis
#ax_rat = 2. #axis ratio a**2/b**2
#rsqd_array = np.sqrt((lon_array - central_lon + ax_rot*(lat_array - central_lat))**2.+ ax_rat*(lat_array - central_lat - ax_rot*(lon_array - central_lon))**2.)*np.cos(np.arctan(ax_rot))
#divide by factor of cos(atan(m)) to account for change in coords
idx = (rsqd_array < radius_degrees)
topo_array[idx] = height* np.exp(-(rsqd_array[idx]**2.)/(2.*std_dev**2.))
else:
print('Invalid topography option given')
if waterworld != True: #Leave flexibility to allow aquamountains!
idx = (land_array == 0.) & (topo_array != 0.)
topo_array[idx] = 0.
#Write land and topography arrays to file
topo_filename = GFDL_BASE + 'exp/' + exp + '/input/land.nc'
topo_file = Dataset(topo_filename, 'w', format='NETCDF3_CLASSIC')
lat = topo_file.createDimension('lat', nlat)
lon = topo_file.createDimension('lon', nlon)
latitudes = topo_file.createVariable('lat','f4',('lat',))
longitudes = topo_file.createVariable('lon','f4',('lon',))
topo_array_netcdf = topo_file.createVariable('zsurf','f4',('lat','lon',))
land_array_netcdf = topo_file.createVariable('land_mask','f4',('lat','lon',))
latitudes[:] = lats
longitudes[:] = lons
topo_array_netcdf[:] = topo_array
land_array_netcdf[:] = land_array
topo_file.close()
print('Output written to: ' + topo_filename)
#Show configuration on screen to allow checking
lon_0 = lons.mean()
lat_0 = lats.mean()
m = Basemap(lat_0=lat_0,lon_0=lon_0)
xi, yi = m(lon_array, lat_array)
plt.figure()
if land_mode != 'none':
m.contour(xi,yi,land_array)
if topo_mode != 'none':
cs = m.contourf(xi,yi,topo_array, cmap=plt.get_cmap('RdBu_r'))
cb = plt.colorbar(cs, shrink=0.5, extend='both')
plt.xticks(np.linspace(0,360,13))
plt.yticks(np.linspace(-90,90,7))
plt.show()
if __name__ == "__main__":
write_land('test',land_mode='continents')
| gpl-3.0 |
Eric89GXL/scikit-learn | sklearn/cross_decomposition/cca_.py | 5 | 3379 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
binghongcha08/pyQMD | GWP/QTGB/traj.py | 14 | 1289 | #!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context("poster")
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
plt.subplot(121)
#plt.ylim(-8,8)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlabel('time')
#plt.ylabel('position')
#plt.title('traj')
ax2 = plt.subplot(122)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
plt.xlabel('time')
ax2.yaxis.tick_right()
ax2.yaxis.set_ticks_position('both')
plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylabel('Norm')
#plt.ylim(0,2)
plt.legend()
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
chrisfilo/fmriprep | test/workflows/test_confounds.py | 2 | 2506 | ''' Testing module for fmriprep.workflows.confounds '''
import logging
import os
import mock
import pandas as pd
from fmriprep.workflows.confounds import discover_wf, _gather_confounds
from test.workflows.utilities import TestWorkflow
from test.workflows import stub
logging.disable(logging.INFO) # don't print unnecessary msgs
class TestConfounds(TestWorkflow):
''' Testing class for fmriprep.workflows.confounds '''
def test_discover_wf(self):
# run
workflow = discover_wf(stub.settings({'biggest_epi_file_size_gb': 1,
'skip_native': False}))
workflow.write_hierarchical_dotfile()
# assert
# check some key paths
self.assert_circular(workflow, [
('outputnode', 'inputnode', [('confounds_file', 'fmri_file')]),
('DerivConfounds', 'inputnode', [('out_file', 'fmri_file')])
])
# Make sure mandatory inputs are set
self.assert_inputs_set(workflow, {'outputnode': ['confounds_file'],
'ConcatConfounds': ['signals', 'dvars', 'frame_displace',
#'acompcor', See confounds.py
'tcompcor'],
'tCompCor': ['components_file']})
# 'aCompCor': ['components_file', 'mask_file'], }) see ^^
@mock.patch('pandas.read_csv')
@mock.patch.object(pd.DataFrame, 'to_csv', autospec=True)
@mock.patch.object(pd.DataFrame, '__eq__', autospec=True,
side_effect=lambda me, them: me.equals(them))
def test_gather_confounds(self, df_equality, mock_df, mock_csv_reader):
''' asserts that the function for node ConcatConfounds reads and writes
the confounds properly '''
# set up
signals = "signals.tsv"
dvars = "dvars.tsv"
mock_csv_reader.side_effect = [pd.DataFrame({'a': [0.1]}), pd.DataFrame({'b': [0.2]})]
# run
_gather_confounds(signals, dvars)
# assert
calls = [mock.call(confounds, sep="\t") for confounds in [signals, dvars]]
mock_csv_reader.assert_has_calls(calls)
confounds = pd.DataFrame({'a': [0.1], 'b': [0.2]})
mock_df.assert_called_once_with(confounds, os.path.abspath("confounds.tsv"),
na_rep='n/a', index=False, sep="\t")
| bsd-3-clause |
rdhyee/PyTables | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| bsd-3-clause |
richardwolny/sms-tools | lectures/05-Sinusoidal-model/plots-code/synthesis-window.py | 22 | 1725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY, Blackman-Harris, Ns = 512")
plt.subplot(3,1,2)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y, size = Ns = 512 (Blackman-Harris window)")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b',lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window.png')
plt.show()
| agpl-3.0 |
gtrensch/nest-simulator | pynest/examples/pulsepacket.py | 8 | 11262 | # -*- coding: utf-8 -*-
#
# pulsepacket.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Pulse packet example
--------------------
This script compares the average and individual membrane potential excursions
in response to a single pulse packet with an analytically acquired voltage
trace (see: Diesmann [1]_)
A pulse packet is a transient spike volley with a Gaussian rate profile.
The user can specify the neural parameters, the parameters of the
pulse-packet and the number of trials.
References
~~~~~~~~~~
.. [1] Diesmann M. 2002. Dissertation. Conditions for stable propagation of
synchronous spiking in cortical neural networks: Single neuron dynamics
and network properties.
http://d-nb.info/968772781/34.
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting.
import scipy.special as sp
import nest
import numpy
import matplotlib.pyplot as plt
# Properties of pulse packet:
a = 100 # number of spikes in one pulse packet
sdev = 10. # width of pulse packet (ms)
weight = 0.1 # PSP amplitude (mV)
pulsetime = 500. # occurrence time (center) of pulse-packet (ms)
# Network and neuron characteristics:
n_neurons = 100 # number of neurons
cm = 200. # membrane capacitance (pF)
tau_s = 0.5 # synaptic time constant (ms)
tau_m = 20. # membrane time constant (ms)
V0 = 0.0 # resting potential (mV)
Vth = numpy.inf # firing threshold, high value to avoid spiking
# Simulation and analysis parameters:
simtime = 1000. # how long we simulate (ms)
simulation_resolution = 0.1 # (ms)
sampling_resolution = 1. # for voltmeter (ms)
convolution_resolution = 1. # for the analytics (ms)
# Some parameters in base units.
Cm = cm * 1e-12 # convert to Farad
Weight = weight * 1e-12 # convert to Ampere
Tau_s = tau_s * 1e-3 # convert to sec
Tau_m = tau_m * 1e-3 # convert to sec
Sdev = sdev * 1e-3 # convert to sec
Convolution_resolution = convolution_resolution * 1e-3 # convert to sec
###############################################################################
# This function calculates the membrane potential excursion in response
# to a single input spike (the equation is given for example in Diesmann [1]_,
# eq.2.3).
# It expects:
#
# * ``Time``: a time array or a single time point (in sec)
# * ``Tau_s`` and ``Tau_m``: the synaptic and the membrane time constant (in sec)
# * ``Cm``: the membrane capacity (in Farad)
# * ``Weight``: the synaptic weight (in Ampere)
#
# It returns the provoked membrane potential (in mV)
def make_psp(Time, Tau_s, Tau_m, Cm, Weight):
term1 = (1 / Tau_s - 1 / Tau_m)
term2 = numpy.exp(-Time / Tau_s)
term3 = numpy.exp(-Time / Tau_m)
PSP = (Weight / Cm * numpy.exp(1) / Tau_s *
(((-Time * term2) / term1) + (term3 - term2) / term1 ** 2))
return PSP * 1e3
###############################################################################
# This function finds the exact location of the maximum of the PSP caused by a
# single input spike. The location is obtained by setting the first derivative
# of the equation for the PSP (see ``make_psp()``) to zero. The resulting
# equation can be expressed in terms of a `LambertW function`.
# This function expects:
#
# * ``Tau_s`` and ``Tau_m``: the synaptic and membrane time constant (in sec)
#
# It returns the location of the maximum (in sec)
def LambertWm1(x):
# Using scipy to mimic the gsl_sf_lambert_Wm1 function.
return sp.lambertw(x, k=-1 if x < 0 else 0).real
def find_loc_pspmax(tau_s, tau_m):
var = tau_m / tau_s
lam = LambertWm1(-numpy.exp(-1 / var) / var)
t_maxpsp = (-var * lam - 1) / var / (1 / tau_s - 1 / tau_m) * 1e-3
return t_maxpsp
###############################################################################
# First, we construct a Gaussian kernel for a given standard derivation
# (``sig``) and mean value (``mu``). In this case the standard derivation is
# the width of the pulse packet (see [1]_).
sig = Sdev
mu = 0.0
x = numpy.arange(-4 * sig, 4 * sig, Convolution_resolution)
term1 = 1 / (sig * numpy.sqrt(2 * numpy.pi))
term2 = numpy.exp(-(x - mu)**2 / (sig**2 * 2))
gauss = term1 * term2 * Convolution_resolution
###############################################################################
# Second, we calculate the PSP of a neuron due to a single spiking input.
# (see Diesmann 2002, eq. 2.3).
# Since we do that in discrete time steps, we first construct an array
# (``t_psp``) that contains the time points we want to consider. Then, the
# function ``make_psp()`` (that creates the PSP) takes the time array as its
# first argument.
t_psp = numpy.arange(0, 10 * (Tau_m + Tau_s), Convolution_resolution)
psp = make_psp(t_psp, Tau_s, Tau_m, Cm, Weight)
###############################################################################
# Now, we want to normalize the PSP amplitude to one. We therefore have to
# divide the PSP by its maximum ([1]_ sec 6.1). The function
# ``find_loc_pspmax()`` returns the exact time point (``t_pspmax``) when we
# expect the maximum to occur. The function ``make_psp()`` calculates the
# corresponding PSP value, which is our PSP amplitude (``psp_amp``).
t_pspmax = find_loc_pspmax(Tau_s, Tau_m)
psp_amp = make_psp(t_pspmax, Tau_s, Tau_m, Cm, Weight)
psp_norm = psp / psp_amp
###############################################################################
# Now we have all ingredients to compute the membrane potential excursion
# (`U`). This calculation implies a convolution of the Gaussian with the
# normalized PSP (see [1]_, eq. 6.9). In order to avoid an offset in the
# convolution, we need to add a pad of zeros on the left side of the
# normalized PSP. Later on we want to compare our analytical results with the
# simulation outcome. Therefore we need a time vector (`t_U`) with the correct
# temporal resolution, which places the excursion of the potential at the
# correct time.
psp_norm = numpy.pad(psp_norm, [len(psp_norm) - 1, 1], mode='constant')
U = a * psp_amp * numpy.convolve(gauss, psp_norm)
ulen = len(U)
t_U = (convolution_resolution * numpy.linspace(-ulen / 2., ulen / 2., ulen) +
pulsetime + 1.)
###############################################################################
# In this section we simulate a network of multiple neurons.
# All these neurons receive an individual pulse packet that is drawn from a
# Gaussian distribution.
#
# We reset the Kernel, define the simulation resolution and set the
# verbosity using ``set_verbosity`` to suppress info messages.
nest.ResetKernel()
nest.SetKernelStatus({'resolution': simulation_resolution})
nest.set_verbosity("M_WARNING")
###############################################################################
# Afterwards we create several neurons, the same amount of
# pulse-packet-generators and a voltmeter. All these nodes/devices
# have specific properties that are specified in device specific
# dictionaries (here: `neuron_pars` for the neurons, `ppg_pars`
# for the and pulse-packet-generators and `vm_pars` for the voltmeter).
neuron_pars = {
'V_th': Vth,
'tau_m': tau_m,
'tau_syn_ex': tau_s,
'C_m': cm,
'E_L': V0,
'V_reset': V0,
'V_m': V0
}
neurons = nest.Create('iaf_psc_alpha', n_neurons, neuron_pars)
ppg_pars = {
'pulse_times': [pulsetime],
'activity': a,
'sdev': sdev
}
ppgs = nest.Create('pulsepacket_generator', n_neurons, ppg_pars)
vm_pars = {'interval': sampling_resolution}
vm = nest.Create('voltmeter', params=vm_pars)
###############################################################################
# Now, we connect each pulse generator to one neuron via static synapses.
# We use the default static synapse, with specified weight.
# The command ``Connect`` connects all kinds of nodes/devices. Since multiple
# nodes/devices can be connected in different ways e.g., each source connects
# to all targets, each source connects to a subset of targets or each source
# connects to exactly one target, we have to specify the connection. In our
# case we use the ``one_to_one`` connection routine since we connect one pulse
# generator (source) to one neuron (target).
# In addition we also connect the `voltmeter` to the `neurons`.
nest.Connect(ppgs, neurons, 'one_to_one', syn_spec={'weight': weight})
nest.Connect(vm, neurons, syn_spec={'weight': weight})
###############################################################################
# In the next step we run the simulation for a given duration in ms.
nest.Simulate(simtime)
###############################################################################
# Finally, we record the membrane potential, when it occurred and to which
# neuron it belongs. The sender and the time point of a voltage
# data point at position x in the voltage array (``V_m``), can be found at the
# same position x in the sender (`senders`) and the time array (`times`).
Vm = vm.get('events', 'V_m')
times = vm.get('events', 'times')
senders = vm.get('events', 'senders')
###############################################################################
# Here we plot the membrane potential derived from the theory and from the
# simulation. Since we simulate multiple neurons that received slightly
# different pulse packets, we plot the individual and the averaged membrane
# potentials.
#
# We plot the analytical solution U (the resting potential V0 shifts the
# membrane potential up or downwards).
plt.plot(t_U, U + V0, 'r', lw=2, zorder=3, label='analytical solution')
###############################################################################
# Then we plot all individual membrane potentials.
# The time axes is the range of the simulation time in steps of ms.
Vm_single = [Vm[senders == n.global_id] for n in neurons]
simtimes = numpy.arange(1, simtime)
for idn in range(n_neurons):
if idn == 0:
plt.plot(simtimes, Vm_single[idn], 'gray',
zorder=1, label='single potentials')
else:
plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1)
###############################################################################
# Finally, we plot the averaged membrane potential.
Vm_average = numpy.mean(Vm_single, axis=0)
plt.plot(simtimes, Vm_average, 'b', lw=4,
zorder=2, label='averaged potential')
plt.legend()
plt.xlabel('time (ms)')
plt.ylabel('membrane potential (mV)')
plt.xlim((-5 * (tau_m + tau_s) + pulsetime,
10 * (tau_m + tau_s) + pulsetime))
plt.show()
| gpl-2.0 |
kastnerkyle/kaggle-wise2014 | basic_clf.py | 1 | 1692 | """Competition script for Wise2014."""
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.cross_validation import cross_val_score, KFold
print("Loading data from svmlight files.")
X_train, y_train = load_svmlight_file(
"data/wise2014-train.libsvm", dtype=np.float32, multilabel=True)
X_test, y_test = load_svmlight_file(
"data/wise2014-test.libsvm", dtype=np.float32, multilabel=True)
print("Binarizing.")
lb = MultiLabelBinarizer()
y_train = lb.fit_transform(y_train)
#http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html
clf = OneVsRestClassifier(LinearSVC(loss='l2', penalty='l2', tol=1e-3,
dual=False), n_jobs=2)
print("Performing cross validation.")
cv = KFold(y_train.shape[0], n_folds=3, shuffle=True, random_state=42)
scores = cross_val_score(clf, X_train, y_train, scoring='f1', cv=cv)
print("CV scores.")
print(scores)
print("F1: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print("Fitting model.")
clf.fit(X_train, y_train)
print("Predict test set.")
pred_y = clf.predict(X_test)
print("Writing predictions.")
out_file = open("submission.csv", "w")
out_file.write("ArticleId,Labels\n")
nid = 64858
for i in range(pred_y.shape[0]):
label = list(lb.classes_[np.where(pred_y[i, :] == 1)[0]].astype("int"))
label = " ".join(map(str, label))
if label == "": # If the label is empty, populate the most frequent label
label = "103"
out_file.write(str(nid + i) + "," + label + "\n")
out_file.close()
| bsd-3-clause |
awacha/saxsfittool | src/saxsfittool/fitfunction/coreshell/coreshell.py | 1 | 3207 | import numpy as np
from matplotlib.axes import Axes
from matplotlib.patches import Ellipse
from .c_coreshell import F2GaussianCoreShellSphereDistribution
from ..core import FitFunction
class CoreShellSphereGaussianDistribution(FitFunction):
name = "Spherical core-shell particles, Gaussian size distribution"
description = "Intensity weighted distribution of spherical core-shell nanoparticles"
arguments = [('factor', 'Scaling factor'),
('background', 'Constant background'),
('rcore', 'Mean core radius'),
('sigmacore', 'HWHM of the core radius'),
('tshell', 'Shell thickness'),
('rhoshell_relative', 'SLD of the shell: the core is -1')]
def function(self, x, factor, background, rcore, sigmacore, tshell, rhoshell_relative):
return factor * F2GaussianCoreShellSphereDistribution(x, rcore, tshell, sigmacore, -1.0, rhoshell_relative) + background
class CoreShellSphereGaussianDistributionRgI0(FitFunction):
name = "Gaussian distribution of core-shell spheres with Rg and I0"
description = "Spherical core-shell nanoparticles, with Rg and I0"
arguments = [('I0', 'Intensity extrapolated to zero'),
('background', 'Constant background'),
('Rg', 'Radius of gyration'),
('rcore', 'Mean core radius'),
('sigmacore', 'HWHM of the core radius'),
('tshell', 'Shell thickness'),
]
unfittable_parameters = [('Ninteg', 'Number of points for numerical integration', 2,100000,100),
]
def _get_rhos(self, I0, Rg, rcore, tshell):
R5=rcore**5
R3=rcore**3
Rpt3= (rcore+tshell)**3
Rpt5 = (rcore+tshell)**5
Rg253=5/3*Rg**2
rhocoredivrhoshell=(Rpt5-R5-Rg253*(Rpt3-R3))/(Rg253*R3-R5)
rhoshell = 3*I0**0.5/4/np.pi/(Rpt3-R3+R3*rhocoredivrhoshell)
rhocore = rhoshell*rhocoredivrhoshell
return rhocore, rhoshell
def function(self, x, I0, background, Rg, rcore, sigmacore, tshell, ninteg):
rhocore, rhoshell = self._get_rhos(I0, Rg, rcore, tshell)
return F2GaussianCoreShellSphereDistribution(x, rcore, tshell, sigmacore, rhocore, rhoshell) + background
def draw_representation(self, fig, x, I0, background, Rg, rcore, sigmacore, tshell, ninteg):
rhocore, rhoshell = self._get_rhos(I0, Rg, rcore, tshell)
fig.clear()
ax=fig.add_subplot(1,1,1)
assert isinstance(ax, Axes)
p=Ellipse((0,0),2*(rcore+tshell),2*(rcore+tshell),color='yellow')
ax.add_patch(p)
p=Ellipse((0,0),2*(rcore),2*(rcore), color='green')
ax.add_patch(p)
ax.autoscale_view(True, True, True)
ax.text(0.05,0.95,
'$R_\mathrm{{core}}$: {}\n'
'$T_\mathrm{{shell}}$: {}\n'
'$\\rho_\mathrm{{core}}$: {}\n'
'$\\rho_\mathrm{{shell}}$: {}\n'
'$I_0$: {}\n'
'$R_g$: {}\n'.format(rcore, tshell, rhocore, rhoshell, I0, Rg),
transform=ax.transAxes,ha='left',va='top')
ax.axis('equal')
fig.canvas.draw()
| bsd-3-clause |
jkarnows/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
nguyentu1602/statsmodels | statsmodels/sandbox/examples/example_gam.py | 33 | 2343 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
astropy/astropy | examples/io/plot_fits-image.py | 11 | 1898 | # -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
| bsd-3-clause |
michigraber/scikit-learn | sklearn/preprocessing/label.py | 35 | 28877 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/preprocessing/label.py | 7 | 27529 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import sparse_min_max
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| mit |
dangall/Kaggle-MobileODT-Cancer-Screening | workflow_classes/benchmark_model.py | 1 | 9184 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:04:33 2017
@author: daniele
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from modules.data_loading import load_training_data
from modules.visualization import display_single_image
from modules.path_munging import count_batches
from modules.image_preprocessing import (batch_load_manipulate,
images_to_percentage_red)
from modules.probablity_manipulation import compute_loss, agnosticize
class BenchmarkModel(object):
"""
Random Forest training on average pixel color.
"""
def __init__(self, model_name="", **kwargs):
self.model_name = model_name
def test_loading(self, batch_and_index=(0, 19), batch_loc=""):
"""
Tests whether the images in the folder are loaded and display as
expected.
Parameters:
image_to_load: tuple of length 2, each entry is an integer
Specifies the batch number and the index within that
batch of that image that is to be loaded and
visualized.
"""
display_single_image(load_training_data(
batch_and_index[0],
batch_loc=batch_loc)[batch_and_index[1]])
def count_training_batches(self, folder):
"""
Convenience function which counts the number of training-batch files
in the specified folder.
Input: string specifying the path to the folder.
"""
return count_batches(folder)
def train(self, training_batches=[], leftright=False, updown=False,
validation_inputarray=[], validation_labels=[],
validation_batchnum=0, agnosticic_average=0, training_folder=""):
"""
Trains the random forest on images reduced to a single (average) pixel.
It is possible to specify the validation set by either giving the
function the number of a data batch, or by giving it the array of the
validation data and the array of its labels.
Parameters:
training_batches: list of ints. Specifies the number-labels of the
batches to be used for training.
leftright: boolean. Specifies whether we should also train on
images that have been flipped left-to-right.
updown: boolean. Specifies whether we should also train on images
that have been flipped upside-down.
validation_inputarray: 4-d array. Only needs to be specified if
validation_batchnum is not. The array is the
input-data of our validation set.
validation_labels: 2-d array. Only needs to be specified if
validation_batchnum is not. The array is the
one-hot-encoded labels of our validation set.
validation_batchnum: int. Only needs to be specified if both
validation_inputarray and validation_labels
are not. Specifies the number-label of the
data batch we want to use as our validation
set.
Returns:
accuracy: validation-set accuracy.
train_loss: training-set loss.
val_loss: validation-set loss.
"""
# Load the training batches (oversampling to make sure they're
# balanced) and concatenate them together
all_train_data = []
all_train_labels = []
for batch_i in training_batches:
(train_data,
train_labels) = batch_load_manipulate(batch_i,
leftright=leftright,
updown=updown,
batch_loc=training_folder)
all_train_data.append(train_data)
all_train_labels.append(train_labels)
all_train_data = np.concatenate(all_train_data)
labels_train = np.concatenate(all_train_labels)
# Load the validation set (again in a balanced way)
if validation_inputarray != []:
val_data = validation_inputarray
labels_val = validation_labels
else:
(val_data,
labels_val) = batch_load_manipulate(validation_batchnum,
leftright=False, updown=False,
batch_loc=training_folder)
# Turn the input data for each image into a single number, which equals
# the percentage of red pixels in the image
input_train = images_to_percentage_red(all_train_data)
input_val = images_to_percentage_red(val_data)
# Fit random forest
rand_forest = RandomForestClassifier(n_estimators=1000)
rand_forest.fit(input_train, labels_train)
self.trained_model = rand_forest
# Compute accuracy, training loss and validation loss
(accuracy,
train_loss,
val_loss) = self.get_stats(input_train, labels_train, input_val,
labels_val,
agnosticic_average=agnosticic_average)
return (accuracy, train_loss, val_loss)
def compute_probas(self, clf, inputdata):
"""
Takes a classifier and an input and predicts one-hot-encoded
proabilities.
"""
probabilities = np.transpose(np.array(
clf.predict_proba(inputdata))[:, :, 1])
return probabilities
def compute_score(self, clf, inputdata, correctlabels):
"""
Computes the probabilities assigned to the classes of the input data,
picks the likeliest one, and checks how many times on average it agrees
with the correctlabels.
"""
predicted_probas = self.compute_probas(clf, inputdata)
argmax_probas = np.array([np.argmax(pp) for pp in predicted_probas])
argmax_truelabels = np.array([np.argmax(pp) for pp in correctlabels])
score = np.mean(argmax_probas == argmax_truelabels)
return score
def get_stats(self, training_inputarray, training_labels,
validation_inputarray, validation_labels,
agnosticic_average=0):
"""
Obtain information about loss and validation accuracy
: training_inputarray: Batch of Numpy image data
: training_labels: Batch of Numpy label data
: validation_inputarray: Batch of Numpy image data
: validation_labels: Batch of Numpy label data
"""
# Predict probabilites
train_probas = self.compute_probas(self.trained_model,
training_inputarray)
val_probas = self.compute_probas(self.trained_model,
validation_inputarray)
if agnosticic_average > 0:
train_probas = agnosticize(train_probas, agnosticic_average)
val_probas = agnosticize(val_probas, agnosticic_average)
# Compute accuracy, training loss and validation loss
accuracy = self.compute_score(self.trained_model,
validation_inputarray,
validation_labels)
train_loss = compute_loss(train_probas, training_labels)
val_loss = compute_loss(val_probas, validation_labels)
return accuracy, train_loss, val_loss
def test(self, load_test_set="", test_set=[], agnosticic_average=0):
"""
Makes predictions on a given set of data. Is able to average it out
with an agnostic probability to obtain less confident probability
estimates.
Parameters:
load_test_set: string. Only needs to be specified if test_set is
not. Full path to the .npy data containing the test
set arrays to be fed into the network.
test_set: array. Only needs to be specified if load_test_set is
not. This is the test-set aray to be fed into the neural
network to obtain predicted probabilities for each label.
agnosticic_average: int. Specifies how many times we average the
predicted probabilities with an agnostic
probability. Default is 0.
Returns: probabilities.
"""
if test_set == []:
testing_inputarray = np.load(load_test_set)
else:
testing_inputarray = test_set
input_test = images_to_percentage_red(testing_inputarray)
probabilities = self.compute_probas(self.trained_model, input_test)
if agnosticic_average > 0:
probabilities = agnosticize(probabilities, agnosticic_average)
return probabilities
| mit |
chinageology/GeoPython | geopytool/__init__.py | 1 | 71038 | #!/usr/bin/python3
# coding:utf-8
from geopytool.ImportDependence import *
from geopytool.CustomClass import *
LocationOfMySelf=os.path.dirname(__file__)
#print(LocationOfMySelf,' init')
sign = '''
created on Sat Dec 17 22:28:24 2016
@author: cycleuser
# Create Date: 2015-07-13
# Modify Date: 2018-02-09
a tool set for daily geology related task.
# prerequisite:
# based on Python 3.x
# need math,numpy,pandas,matplotlib,xlrd,pyqt5,BeautifulSoup4,pyopengl,pyqtgraph
Any issues or improvements please contact [email protected]
or Open An Issue at GitHub:https://github.com/GeoPyTool/GeoPyTool/issues
Website For Chinese Users:https://zhuanlan.zhihu.com/p/28908475
'''
t = 'You are using GeoPyTool ' + version + ', released on' + date + '\n' + sign
_translate = QtCore.QCoreApplication.translate
from geopytool.CustomClass import TableViewer
from geopytool.CIPW import CIPW
from geopytool.Niggli import Niggli
from geopytool.Cluster import Cluster
from geopytool.Harker import Harker
from geopytool.HarkerOld import HarkerOld
#from geopytool.Magic import Magic
from geopytool.Clastic import Clastic
from geopytool.CIA import CIA
from geopytool.IsoTope import IsoTope
from geopytool.KArIsoTope import KArIsoTope
from geopytool.MultiDimension import MultiDimension
from geopytool.Combine import MyCombine
from geopytool.Flatten import MyFlatten
from geopytool.MyFA import MyFA
from geopytool.MyPCA import MyPCA
from geopytool.Trans import MyTrans
from geopytool.Dist import MyDist
from geopytool.Sta import MySta
from geopytool.ThreeD import MyThreeD
from geopytool.TwoD import MyTwoD
from geopytool.TwoD_Grey import MyTwoD_Grey
from geopytool.Pearce import Pearce
from geopytool.QAPF import QAPF
from geopytool.QFL import QFL
from geopytool.QmFLt import QmFLt
from geopytool.REE import REE
from geopytool.Rose import Rose
from geopytool.Stereo import Stereo
from geopytool.TAS import TAS
from geopytool.K2OSiO2 import K2OSiO2
from geopytool.Saccani import Saccani
from geopytool.Raman import Raman
from geopytool.FluidInclusion import FluidInclusion
from geopytool.MyHist import MyHist
from geopytool.Temp import *
from geopytool.TraceNew import TraceNew
from geopytool.Trace import Trace
from geopytool.XY import XY
from geopytool.XYZ import XYZ
from geopytool.ZirconCe import ZirconCe
from geopytool.ZirconCeOld import ZirconCeOld
from geopytool.Magic import Magic
# Create a custom "QProxyStyle" to enlarge the QMenu icons
#-----------------------------------------------------------
class MyProxyStyle(QProxyStyle):
pass
def pixelMetric(self, QStyle_PixelMetric, option=None, widget=None):
if QStyle_PixelMetric == QStyle.PM_SmallIconSize:
return 24
else:
return QProxyStyle.pixelMetric(self, QStyle_PixelMetric, option, widget)
class Ui_MainWindow(QtWidgets.QMainWindow):
raw = pd.DataFrame(index=[], columns=[]) # raw is initialized as a blank DataFrame
Standard = {}# Standard is initialized as a blank Dict
Language = ''
app = QtWidgets.QApplication(sys.argv)
myStyle = MyProxyStyle('Fusion') # The proxy style should be based on an existing style,
# like 'Windows', 'Motif', 'Plastique', 'Fusion', ...
app.setStyle(myStyle)
trans = QtCore.QTranslator()
talk=''
targetversion = '0'
DataLocation =''
ChemResult=pd.DataFrame()
AutoResult=pd.DataFrame()
TotalResult=[]
def __init__(self):
super(Ui_MainWindow, self).__init__()
self.setObjectName('MainWindow')
self.resize(800, 600)
self.setAcceptDrops(True)
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate('MainWindow', u'GeoPyTool'))
self.setWindowIcon(QIcon(LocationOfMySelf+'/geopytool.png'))
self.talk= _translate('MainWindow','You are using GeoPyTool ') + version +'\n'+ _translate('MainWindow','released on ') + date
self.model = PandasModel(self.raw)
self.main_widget = QWidget(self)
self.tableView = CustomQTableView(self.main_widget)
self.tableView.setObjectName('tableView')
self.tableView.setSortingEnabled(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.tableView)
self.main_widget.setLayout(self.vbox)
self.setCentralWidget(self.main_widget)
self.menubar = QtWidgets.QMenuBar(self)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1000, 22))
self.menubar.setNativeMenuBar(False)
self.menubar.setObjectName('menubar')
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName('menuFile')
self.menuGeoChem = QtWidgets.QMenu(self.menubar)
self.menuGeoChem.setObjectName('menuGeoChem')
self.menuStructure = QtWidgets.QMenu(self.menubar)
self.menuStructure.setObjectName('menuStructure')
self.menuSedimentary = QtWidgets.QMenu(self.menubar)
self.menuSedimentary.setObjectName('menuSedimentary')
self.menuGeoCalc = QtWidgets.QMenu(self.menubar)
self.menuGeoCalc.setObjectName('menuGeoCalc')
self.menuAdditional = QtWidgets.QMenu(self.menubar)
self.menuAdditional.setObjectName('menuAdditional')
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName('menuHelp')
self.menuLanguage = QtWidgets.QMenu(self.menubar)
self.menuLanguage.setObjectName('menuLanguage')
self.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(self)
self.statusbar.setObjectName('statusbar')
self.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(QIcon(LocationOfMySelf+'/open.png'), u'Open',self)
self.actionOpen.setObjectName('actionOpen')
self.actionOpen.setShortcut('Ctrl+O')
self.actionClose = QtWidgets.QAction(QIcon(LocationOfMySelf+'/close.png'), u'Close',self)
self.actionClose.setObjectName('actionClose')
self.actionClose.setShortcut('Ctrl+N')
self.actionSet = QtWidgets.QAction(QIcon(LocationOfMySelf + '/set.png'), u'Set', self)
self.actionSet.setObjectName('actionSet')
self.actionSet.setShortcut('Ctrl+F')
self.actionSave = QtWidgets.QAction(QIcon(LocationOfMySelf+'/save.png'), u'Save',self)
self.actionSave.setObjectName('actionSave')
self.actionSave.setShortcut('Ctrl+S')
self.actionCombine = QtWidgets.QAction(QIcon(LocationOfMySelf+'/combine.png'),u'Combine',self)
self.actionCombine.setObjectName('actionCombine')
self.actionCombine.setShortcut('Alt+C')
self.actionCombine_transverse = QtWidgets.QAction(QIcon(LocationOfMySelf+'/combine.png'),u'Combine_transverse',self)
self.actionCombine_transverse.setObjectName('actionCombine_transverse')
self.actionCombine_transverse.setShortcut('Alt+T')
self.actionFlatten = QtWidgets.QAction(QIcon(LocationOfMySelf+'/flatten.png'),u'Flatten',self)
self.actionFlatten.setObjectName('actionFlatten')
self.actionFlatten.setShortcut('Alt+F')
self.actionTrans = QtWidgets.QAction(QIcon(LocationOfMySelf+'/trans.png'),u'Trans',self)
self.actionTrans.setObjectName('actionTrans')
self.actionTrans.setShortcut('Ctrl+T')
self.actionReFormat = QtWidgets.QAction(QIcon(LocationOfMySelf+'/trans.png'),u'ReFormat',self)
self.actionReFormat.setObjectName('actionReFormat')
self.actionReFormat.setShortcut('Alt+R')
self.actionQuit = QtWidgets.QAction(QIcon(LocationOfMySelf+'/quit.png'), u'Quit',self)
self.actionQuit.setObjectName('actionQuit')
self.actionQuit.setShortcut('Ctrl+Q')
self.actionWeb = QtWidgets.QAction(QIcon(LocationOfMySelf+'/forum.png'), u'English Forum',self)
self.actionWeb.setObjectName('actionWeb')
self.actionGoGithub = QtWidgets.QAction(QIcon(LocationOfMySelf+'/github.png'), u'GitHub',self)
self.actionGoGithub.setObjectName('actionGoGithub')
self.actionVersionCheck = QtWidgets.QAction(QIcon(LocationOfMySelf+'/update.png'), u'Version',self)
self.actionVersionCheck.setObjectName('actionVersionCheck')
self.actionCnS = QtWidgets.QAction(QIcon(LocationOfMySelf+'/cns.png'), u'Simplified Chinese',self)
self.actionCnS.setObjectName('actionCnS')
self.actionCnT = QtWidgets.QAction(QIcon(LocationOfMySelf+'/cnt.png'), u'Traditional Chinese',self)
self.actionCnT.setObjectName('actionCnT')
self.actionEn = QtWidgets.QAction(QIcon(LocationOfMySelf+'/en.png'), u'English',self)
self.actionEn.setObjectName('actionEn')
self.actionLoadLanguage = QtWidgets.QAction(QIcon(LocationOfMySelf+'/lang.png'), u'Load Language',self)
self.actionLoadLanguage.setObjectName('actionLoadLanguage')
self.actionTAS = QtWidgets.QAction(QIcon(LocationOfMySelf+'/xy.png'), u'TAS',self)
self.actionTAS.setObjectName('actionTAS')
self.actionTrace = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider2.png'), u'Trace',self)
self.actionTrace.setObjectName('actionTrace')
self.actionTraceNew = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider2.png'), u'TraceNew',self)
self.actionTraceNew.setObjectName('actionTraceNew')
self.actionRee = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider2.png'), u'Ree',self)
self.actionRee.setObjectName('actionRee')
self.actionPearce = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider.png'),u'Pearce',self)
self.actionPearce.setObjectName('actionPearce')
self.actionHarker = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider.png'),u'Harker',self)
self.actionHarker.setObjectName('actionHarker')
self.actionHarkerOld = QtWidgets.QAction(QIcon(LocationOfMySelf+'/spider.png'),u'HarkerOld',self)
self.actionHarkerOld.setObjectName('actionHarkerOld')
self.actionRemoveLOI= QtWidgets.QAction(QIcon(LocationOfMySelf+'/fire.png'),u'RemoveLOI',self)
self.actionRemoveLOI.setObjectName('actionRemoveLOI')
self.actionK2OSiO2 = QtWidgets.QAction(QIcon(LocationOfMySelf+'/xy.png'), u'K2OSiO2',self)
self.actionK2OSiO2.setObjectName('actionK2OSiO2')
self.actionStereo = QtWidgets.QAction(QIcon(LocationOfMySelf+'/structure.png'),u'Stereo',self)
self.actionStereo.setObjectName('actionStereo')
self.actionRose = QtWidgets.QAction(QIcon(LocationOfMySelf+'/rose.png'),u'Rose',self)
self.actionRose.setObjectName('actionRose')
self.actionQFL = QtWidgets.QAction(QIcon(LocationOfMySelf+'/triangular.png'),u'QFL',self)
self.actionQFL.setObjectName('actionQFL')
self.actionQmFLt = QtWidgets.QAction(QIcon(LocationOfMySelf+'/triangular.png'),u'QmFLt',self)
self.actionQmFLt.setObjectName('actionQmFLt')
self.actionCIPW = QtWidgets.QAction(QIcon(LocationOfMySelf+'/calc.png'),u'CIPW',self)
self.actionCIPW.setObjectName('actionCIPW')
self.actionNiggli = QtWidgets.QAction(QIcon(LocationOfMySelf+'/calc.png'),u'Niggli',self)
self.actionNiggli.setObjectName('actionNiggli')
self.actionZirconCe = QtWidgets.QAction(QIcon(LocationOfMySelf+'/calc.png'),u'ZirconCe',self)
self.actionZirconCe.setObjectName('actionZirconCe')
self.actionZirconCeOld = QtWidgets.QAction(QIcon(LocationOfMySelf+'/calc.png'),u'ZirconCeOld',self)
self.actionZirconCeOld.setObjectName('actionZirconCeOldOld')
self.actionZirconTiTemp = QtWidgets.QAction(QIcon(LocationOfMySelf+'/temperature.png'),u'ZirconTiTemp',self)
self.actionZirconTiTemp.setObjectName('actionZirconTiTemp')
self.actionRutileZrTemp = QtWidgets.QAction(QIcon(LocationOfMySelf+'/temperature.png'),u'RutileZrTemp',self)
self.actionRutileZrTemp.setObjectName('actionRutileZrTemp')
self.actionCluster = QtWidgets.QAction(QIcon(LocationOfMySelf+'/cluster.png'),u'Cluster',self)
self.actionCluster.setObjectName('actionCluster')
self.actionAuto = QtWidgets.QAction(QIcon(LocationOfMySelf+'/auto.png'),u'Auto',self)
self.actionAuto.setObjectName('actionAuto')
self.actionMultiDimension = QtWidgets.QAction(QIcon(LocationOfMySelf+'/multiple.png'),u'MultiDimension',self)
self.actionMultiDimension.setObjectName('actionMultiDimension')
self.actionThreeD = QtWidgets.QAction(QIcon(LocationOfMySelf+'/multiple.png'),u'ThreeD',self)
self.actionThreeD.setObjectName('actionThreeD')
self.actionTwoD = QtWidgets.QAction(QIcon(LocationOfMySelf+'/qapf.png'),u'TwoD',self)
self.actionTwoD.setObjectName('actionTwoD')
self.actionTwoD_Grey = QtWidgets.QAction(QIcon(LocationOfMySelf+'/qapf.png'),u'TwoD Grey',self)
self.actionTwoD_Grey.setObjectName('actionTwoD_Grey')
self.actionDist = QtWidgets.QAction(QIcon(LocationOfMySelf+'/dist.png'),u'Dist',self)
self.actionDist.setObjectName('actionDist')
self.actionStatistics = QtWidgets.QAction(QIcon(LocationOfMySelf+'/statistics.png'), u'Statistics',self)
self.actionStatistics.setObjectName('actionStatistics')
self.actionMyHist = QtWidgets.QAction(QIcon(LocationOfMySelf+'/h.png'), u'Histogram',self)
self.actionMyHist.setObjectName('actionMyHist')
self.actionFA = QtWidgets.QAction(QIcon(LocationOfMySelf+'/fa.png'),u'FA',self)
self.actionFA.setObjectName('actionFA')
self.actionPCA = QtWidgets.QAction(QIcon(LocationOfMySelf+'/pca.png'),u'PCA',self)
self.actionPCA.setObjectName('actionPCA')
self.actionQAPF = QtWidgets.QAction(QIcon(LocationOfMySelf+'/qapf.png'),u'QAPF',self)
self.actionQAPF.setObjectName('actionQAPF')
self.actionSaccani = QtWidgets.QAction(QIcon(LocationOfMySelf + '/s.png'), u'Saccani Plot', self)
self.actionSaccani.setObjectName('actionSaccani')
self.actionRaman = QtWidgets.QAction(QIcon(LocationOfMySelf + '/r.png'), u'Raman Strength', self)
self.actionRaman.setObjectName('actionRaman')
self.actionFluidInclusion = QtWidgets.QAction(QIcon(LocationOfMySelf + '/f.png'), u'Fluid Inclusion', self)
self.actionFluidInclusion.setObjectName('actionFluidInclusion')
self.actionClastic = QtWidgets.QAction(QIcon(LocationOfMySelf+'/mud.png'),u'Clastic',self)
self.actionClastic.setObjectName("actionClastic")
self.actionCIA = QtWidgets.QAction(QIcon(LocationOfMySelf+'/mud.png'),u'CIA/ICV/PIA/CIW/CIW\'',self)
self.actionCIA.setObjectName("actionCIA")
self.actionXY = QtWidgets.QAction(QIcon(LocationOfMySelf+'/xy.png'), u'X-Y',self)
self.actionXY.setObjectName('actionXY')
self.actionXYZ = QtWidgets.QAction(QIcon(LocationOfMySelf+'/triangular.png'),u'Ternary',self)
self.actionXYZ.setObjectName('actionXYZ')
self.actionRbSrIsoTope = QtWidgets.QAction(QIcon(LocationOfMySelf+'/magic.png'),u'Rb-Sr IsoTope',self)
self.actionRbSrIsoTope.setObjectName('actionRbSrIsoTope')
self.actionSmNdIsoTope = QtWidgets.QAction(QIcon(LocationOfMySelf+'/magic.png'),u'Sm-Nd IsoTope',self)
self.actionSmNdIsoTope.setObjectName('actionSmNdIsoTope')
self.actionKArIsoTope = QtWidgets.QAction(QIcon(LocationOfMySelf+'/magic.png'),u'K-Ar IsoTope',self)
self.actionKArIsoTope.setObjectName('actionKArIsoTope')
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionClose)
self.menuFile.addAction(self.actionSet)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionCombine)
self.menuFile.addAction(self.actionCombine_transverse)
self.menuFile.addAction(self.actionFlatten)
self.menuFile.addAction(self.actionTrans)
#self.menuFile.addAction(self.actionReFormat)
self.menuFile.addAction(self.actionQuit)
self.menuGeoChem.addAction(self.actionRemoveLOI)
self.menuGeoChem.addAction(self.actionAuto)
self.menuGeoChem.addAction(self.actionTAS)
self.menuGeoChem.addAction(self.actionTrace)
self.menuGeoChem.addAction(self.actionRee)
self.menuGeoChem.addAction(self.actionPearce)
self.menuGeoChem.addAction(self.actionHarker)
self.menuGeoChem.addAction(self.actionCIPW)
#self.menuGeoChem.addAction(self.actionNiggli)
self.menuGeoChem.addAction(self.actionQAPF)
self.menuGeoChem.addAction(self.actionSaccani)
self.menuGeoChem.addAction(self.actionK2OSiO2)
self.menuGeoChem.addAction(self.actionRaman)
self.menuGeoChem.addAction(self.actionFluidInclusion)
self.menuGeoChem.addAction(self.actionHarkerOld)
self.menuGeoChem.addAction(self.actionTraceNew)
self.menuStructure.addAction(self.actionStereo)
self.menuStructure.addAction(self.actionRose)
self.menuSedimentary.addAction(self.actionQFL)
self.menuSedimentary.addAction(self.actionQmFLt)
self.menuSedimentary.addAction(self.actionClastic)
self.menuSedimentary.addAction(self.actionCIA)
self.menuGeoCalc.addAction(self.actionZirconCe)
self.menuGeoCalc.addAction(self.actionZirconCeOld)
self.menuGeoCalc.addAction(self.actionZirconTiTemp)
self.menuGeoCalc.addAction(self.actionRutileZrTemp)
self.menuGeoCalc.addAction(self.actionRbSrIsoTope)
self.menuGeoCalc.addAction(self.actionSmNdIsoTope)
#self.menuGeoCalc.addAction(self.actionKArIsoTope)
self.menuAdditional.addAction(self.actionXY)
self.menuAdditional.addAction(self.actionXYZ)
self.menuAdditional.addAction(self.actionCluster)
self.menuAdditional.addAction(self.actionMultiDimension)
self.menuAdditional.addAction(self.actionFA)
self.menuAdditional.addAction(self.actionPCA)
self.menuAdditional.addAction(self.actionDist)
self.menuAdditional.addAction(self.actionStatistics)
self.menuAdditional.addAction(self.actionThreeD)
self.menuAdditional.addAction(self.actionTwoD)
self.menuAdditional.addAction(self.actionTwoD_Grey)
self.menuAdditional.addAction(self.actionMyHist)
self.menuHelp.addAction(self.actionWeb)
self.menuHelp.addAction(self.actionGoGithub)
self.menuHelp.addAction(self.actionVersionCheck)
self.menuLanguage.addAction(self.actionCnS)
self.menuLanguage.addAction(self.actionCnT)
self.menuLanguage.addAction(self.actionEn)
self.menuLanguage.addAction(self.actionLoadLanguage)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuGeoChem.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuStructure.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuSedimentary.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuGeoCalc.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuAdditional.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuHelp.menuAction())
self.menubar.addSeparator()
self.menubar.addAction(self.menuLanguage.menuAction())
self.menubar.addSeparator()
self.actionCombine.triggered.connect(self.Combine)
self.actionCombine_transverse.triggered.connect(self.Combine_transverse)
self.actionFlatten.triggered.connect(self.Flatten)
self.actionTrans.triggered.connect(self.Trans)
self.actionReFormat.triggered.connect(self.ReFormat)
self.actionTAS.triggered.connect(self.TAS)
self.actionTrace.triggered.connect(self.Trace)
self.actionTraceNew.triggered.connect(self.TraceNew)
self.actionRee.triggered.connect(self.REE)
self.actionPearce.triggered.connect(self.Pearce)
self.actionHarker.triggered.connect(self.Harker)
self.actionHarkerOld.triggered.connect(self.HarkerOld)
self.actionQAPF.triggered.connect(self.QAPF)
self.actionSaccani.triggered.connect(self.Saccani)
self.actionK2OSiO2.triggered.connect(self.K2OSiO2)
self.actionRaman.triggered.connect(self.Raman)
self.actionFluidInclusion.triggered.connect(self.FluidInclusion)
self.actionStereo.triggered.connect(self.Stereo)
self.actionRose.triggered.connect(self.Rose)
self.actionQFL.triggered.connect(self.QFL)
self.actionQmFLt.triggered.connect(self.QmFLt)
self.actionClastic.triggered.connect(self.Clastic)
self.actionCIA.triggered.connect(self.CIA)
self.actionCIPW.triggered.connect(self.CIPW)
self.actionNiggli.triggered.connect(self.Niggli)
self.actionZirconCe.triggered.connect(self.ZirconCe)
self.actionZirconCeOld.triggered.connect(self.ZirconCeOld)
self.actionZirconTiTemp.triggered.connect(self.ZirconTiTemp)
self.actionRutileZrTemp.triggered.connect(self.RutileZrTemp)
self.actionCluster.triggered.connect(self.Cluster)
self.actionAuto.triggered.connect(self.Auto)
self.actionMultiDimension.triggered.connect(self.MultiDimension)
self.actionRemoveLOI.triggered.connect(self.RemoveLOI)
self.actionFA.triggered.connect(self.FA)
self.actionPCA.triggered.connect(self.PCA)
self.actionDist.triggered.connect(self.Dist)
self.actionStatistics.triggered.connect(self.Sta)
self.actionThreeD.triggered.connect(self.ThreeD)
self.actionTwoD.triggered.connect(self.TwoD)
self.actionTwoD_Grey.triggered.connect(self.TwoD_Grey)
self.actionMyHist.triggered.connect(self.MyHist)
#self.actionICA.triggered.connect(self.ICA)
#self.actionSVM.triggered.connect(self.SVM)
self.actionOpen.triggered.connect(self.getDataFile)
self.actionClose.triggered.connect(self.clearDataFile)
self.actionSet.triggered.connect(self.SetUpDataFile)
self.actionSave.triggered.connect(self.saveDataFile)
self.actionQuit.triggered.connect(qApp.quit)
self.actionWeb.triggered.connect(self.goIssue)
self.actionGoGithub.triggered.connect(self.goGitHub)
self.actionVersionCheck.triggered.connect(self.checkVersion)
self.actionCnS.triggered.connect(self.to_ChineseS)
self.actionCnT.triggered.connect(self.to_ChineseT)
self.actionEn.triggered.connect(self.to_English)
self.actionLoadLanguage.triggered.connect(self.to_LoadLanguage)
self.actionXY.triggered.connect(self.XY)
self.actionXYZ.triggered.connect(self.XYZ)
self.actionRbSrIsoTope.triggered.connect(self.RbSrIsoTope)
self.actionSmNdIsoTope.triggered.connect(self.SmNdIsoTope)
self.actionKArIsoTope.triggered.connect(self.KArIsoTope)
self.ReadConfig()
self.trans.load(LocationOfMySelf+'/'+self.Language)
self.app.installTranslator(self.trans)
self.retranslateUi()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.talk= _translate('MainWindow','You are using GeoPyTool ') + version +'\n'+ _translate('MainWindow','released on ') + date + '\n'
self.menuFile.setTitle(_translate('MainWindow', u'Data File'))
self.menuGeoChem.setTitle(_translate('MainWindow', u'Geochemistry'))
self.menuGeoCalc.setTitle(_translate('MainWindow',u'Calculation'))
self.menuStructure.setTitle(_translate('MainWindow', u'Structure'))
self.menuSedimentary.setTitle(_translate('MainWindow', u'Sedimentary'))
self.menuAdditional.setTitle(_translate('MainWindow', u'Additional Functions'))
self.menuHelp.setTitle(_translate('MainWindow', u'Help'))
self.menuLanguage.setTitle(_translate('MainWindow', u'Language'))
self.actionCombine.setText(_translate('MainWindow', u'Combine'))
self.actionCombine_transverse.setText(_translate('MainWindow', u'Combine_transverse'))
self.actionFlatten.setText(_translate('MainWindow',u'Flatten'))
self.actionTrans.setText(_translate('MainWindow',u'Trans'))
self.actionReFormat.setText(_translate('MainWindow',u'ReFormat'))
self.actionOpen.setText(_translate('MainWindow', u'Open Data'))
self.actionClose.setText(_translate('MainWindow', u'Close Data'))
self.actionSet.setText(_translate('MainWindow', u'Set Format'))
self.actionSave.setText(_translate('MainWindow', u'Save Data'))
self.actionQuit.setText(_translate('MainWindow', u'Quit App'))
self.actionRemoveLOI.setText('1-0 '+_translate('MainWindow',u'Remove LOI'))
self.actionAuto.setText('1-1 '+_translate('MainWindow', u'Auto'))
self.actionTAS.setText('1-2 '+ _translate('MainWindow',u'TAS'))
self.actionTrace.setText('1-3 '+_translate('MainWindow',u'Trace'))
self.actionRee.setText('1-4 '+_translate('MainWindow',u'REE'))
self.actionPearce.setText('1-5 '+_translate('MainWindow',u'Pearce'))
self.actionHarker.setText('1-6 '+_translate('MainWindow',u'Harker'))
self.actionCIPW.setText('1-7 '+_translate('MainWindow',u'CIPW'))
#self.actionNiggli.setText('1-8 '+_translate('MainWindow',u'Niggli'))
self.actionQAPF.setText('1-9 '+_translate('MainWindow',u'QAPF'))
self.actionSaccani.setText('1-10 '+_translate('MainWindow',u'Saccani Plot'))
self.actionK2OSiO2.setText('1-11 '+_translate('MainWindow',u'K2O-SiO2'))
self.actionRaman.setText('1-12 '+_translate('MainWindow',u'Raman Strength'))
self.actionFluidInclusion.setText('1-13 '+_translate('MainWindow',u'Fluid Inclusion'))
self.actionHarkerOld.setText('1-14 '+_translate('MainWindow',u'Harker Classical'))
self.actionTraceNew.setText('1-15 '+_translate('MainWindow',u'TraceNew'))
self.actionStereo.setText('2-1 '+_translate('MainWindow',u'Stereo'))
self.actionRose.setText('2-2 '+_translate('MainWindow',u'Rose'))
self.actionQFL.setText('3-1 '+_translate('MainWindow',u'QFL'))
self.actionQmFLt.setText('3-2 '+_translate('MainWindow',u'QmFLt'))
self.actionClastic.setText('3-3 '+_translate('MainWindow',u'Clastic'))
self.actionCIA.setText('3-4 '+ _translate('MainWindow',u'CIA/ICV/PIA/CIW/CIW\''))
self.actionZirconCe.setText('4-1 '+ _translate('MainWindow',u'ZirconCe'))
self.actionZirconCeOld.setText('4-2 '+ _translate('MainWindow', u'ZirconCeOld'))
self.actionZirconTiTemp.setText('4-3 '+ _translate('MainWindow',u'ZirconTiTemp'))
self.actionRutileZrTemp.setText('4-4 '+_translate('MainWindow',u'RutileZrTemp'))
self.actionRbSrIsoTope.setText('4-5 '+_translate('MainWindow',u'Rb-Sr IsoTope'))
self.actionSmNdIsoTope.setText('4-6 '+_translate('MainWindow',u'Sm-Nd IsoTope'))
#self.actionKArIsoTope.setText(_translate('MainWindow',u'K-Ar IsoTope'))
self.actionXY.setText('5-1 '+_translate('MainWindow',u'X-Y plot'))
self.actionXYZ.setText('5-2 '+_translate('MainWindow',u'X-Y-Z plot'))
self.actionCluster.setText('5-3 '+_translate('MainWindow',u'Cluster'))
self.actionMultiDimension.setText('5-4 '+_translate('MainWindow',u'MultiDimension'))
self.actionFA.setText('5-5 '+_translate('MainWindow',u'FA'))
self.actionPCA.setText('5-6 '+_translate('MainWindow',u'PCA'))
self.actionDist.setText('5-7 '+_translate('MainWindow',u'Distance'))
self.actionStatistics.setText('5-8 '+_translate('MainWindow',u'Statistics'))
self.actionThreeD.setText('5-9 '+_translate('MainWindow',u'ThreeD'))
self.actionTwoD.setText('5-10 '+_translate('MainWindow',u'TwoD'))
self.actionTwoD_Grey.setText('5-11 '+_translate('MainWindow',u'TwoD Grey'))
self.actionMyHist.setText('5-12 '+_translate('MainWindow',u'Histogram + KDE Curve'))
self.actionVersionCheck.setText(_translate('MainWindow', u'Check Update'))
self.actionWeb.setText(_translate('MainWindow', u'English Forum'))
self.actionGoGithub.setText(_translate('MainWindow', u'Github'))
'''
self.actionCnS.setText(_translate('MainWindow',u'Simplified Chinese'))
self.actionCnT.setText(_translate('MainWindow', u'Traditional Chinese'))
self.actionEn.setText(_translate('MainWindow',u'English'))
'''
self.actionCnS.setText(u'简体中文')
self.actionCnT.setText(u'繁體中文')
self.actionEn.setText(u'English')
self.actionLoadLanguage.setText(_translate('MainWindow',u'Load Language'))
def goGitHub(self):
webbrowser.open('https://github.com/GeoPyTool/GeoPyTool')
def goIssue(self):
webbrowser.open('https://github.com/GeoPyTool/GeoPyTool/issues')
def checkVersion(self):
#reply = QMessageBox.information(self, 'Version', self.talk)
_translate = QtCore.QCoreApplication.translate
url = 'https://raw.githubusercontent.com/GeoPyTool/GeoPyTool/master/geopytool/CustomClass.py'
r= 0
try:
r = requests.get(url, allow_redirects=True)
r.raise_for_status()
NewVersion = 'self.target' + r.text.splitlines()[0]
except requests.exceptions.ConnectionError as err:
print(err)
r=0
buttonReply = QMessageBox.information(self, _translate('MainWindow', u'NetWork Error'),_translate('MainWindow', u'Net work unavailable.'))
NewVersion ="targetversion = '0'"
except requests.exceptions.HTTPError as err:
print(err)
r=0
buttonReply = QMessageBox.information(self, _translate('MainWindow', u'NetWork Error'),_translate('MainWindow', u'Net work unavailable.'))
NewVersion ="targetversion = '0'"
exec(NewVersion)
print('web is', self.targetversion)
print(NewVersion)
self.talk= _translate('MainWindow','Version Online is ') + self.targetversion +'\n'+_translate('MainWindow','You are using GeoPyTool ') + version +'\n'+ _translate('MainWindow','released on ') + date + '\n'
if r != 0:
print('now is',version)
if (version < self.targetversion):
buttonReply = QMessageBox.question(self, _translate('MainWindow', u'Version'),
self.talk + _translate('MainWindow',
'New version available.\n Download and update?'),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
print('Yes clicked.')
#qApp.quit
#pip.main(['install', 'geopytool', '--upgrade --no-cache-dir'])
#self.UpDate
webbrowser.open('https://github.com/chinageology/GeoPyTool/blob/master/Download.md')
else:
print('No clicked.')
else:
buttonReply = QMessageBox.information(self, _translate('MainWindow', u'Version'),
self.talk + _translate('MainWindow',
'This is the latest version.'))
def Update(self):
#webbrowser.open('https://github.com/chinageology/GeoPyTool/wiki/Download')
pip.main(['install', 'geopytool','--upgrade'])
def ReadConfig(self):
if(os.path.isfile('config.ini')):
try:
with open('config.ini', 'rt') as f:
try:
data = f.read()
except:
data = 'Language = \'en\''
pass
print(data)
try:
print("self." + data)
exec("self." + data)
except:
pass
print(self.Language)
except():
pass
def WriteConfig(self,text=LocationOfMySelf+'/en'):
try:
with open('config.ini', 'wt') as f:
f.write(text)
except():
pass
def to_ChineseS(self):
self.trans.load(LocationOfMySelf+'/cns')
self.app.installTranslator(self.trans)
self.retranslateUi()
self.WriteConfig('Language = \'cns\'')
def to_ChineseT(self):
self.trans.load(LocationOfMySelf+'/cnt')
self.app.installTranslator(self.trans)
self.retranslateUi()
self.WriteConfig('Language = \'cnt\'')
def to_English(self):
self.trans.load(LocationOfMySelf+'/en')
self.app.installTranslator(self.trans)
self.retranslateUi()
self.WriteConfig('Language = \'en\'')
def to_LoadLanguage(self):
_translate = QtCore.QCoreApplication.translate
fileName, filetype = QFileDialog.getOpenFileName(self,_translate('MainWindow', u'Choose Language File'),
'~/',
'Language Files (*.qm)') # 设置文件扩展名过滤,注意用双分号间隔
print(fileName)
self.trans.load(fileName)
self.app.installTranslator(self.trans)
self.retranslateUi()
def ErrorEvent(self,text=''):
if(text==''):
reply = QMessageBox.information(self, _translate('MainWindow', 'Warning'), _translate('MainWindow',
'Your Data mismatch this Function.\n Some Items missing?\n Or maybe there are blanks in items names?\n Or there are nonnumerical value?'))
else:
reply = QMessageBox.information(self, _translate('MainWindow', 'Warning'), _translate('MainWindow',
'Your Data mismatch this Function.\n Error infor is:') + text)
def SetUpDataFile(self):
flag = 0
ItemsAvalibale = self.model._df.columns.values.tolist()
ItemsToTest = ['Label', 'Marker', 'Color', 'Size', 'Alpha', 'Style', 'Width','Index']
LabelList = []
MarkerList = []
ColorList = []
SizeList = []
AlphaList = []
StyleList = []
WidthList = []
IndexList = []
for i in range(len(self.model._df)):
LabelList.append('Group1')
MarkerList.append('o')
ColorList.append('red')
SizeList.append(10)
AlphaList.append(0.6)
StyleList.append('-')
WidthList.append(1)
IndexList.append(i+1)
data = {'Label': LabelList,
'Index': IndexList,
'Marker': MarkerList,
'Color': ColorList,
'Size': SizeList,
'Alpha': AlphaList,
'Style': StyleList,
'Width': WidthList}
for i in ItemsToTest:
if i not in ItemsAvalibale:
# print(i)
flag = flag + 1
tmpdftoadd = pd.DataFrame({i: data[i]})
self.model._df = pd.concat([tmpdftoadd, self.model._df], axis=1)
self.model = PandasModel(self.model._df)
self.tableView.setModel(self.model)
if flag == 0:
reply = QMessageBox.information(self, _translate('MainWindow','Ready'),
_translate('MainWindow','Everything fine and no need to set up.'))
else:
reply = QMessageBox.information(self, _translate('MainWindow','Ready'),
_translate('MainWindow','Items added, Modify in the Table to set up details.'))
def clearDataFile(self):
self.raw = pd.DataFrame()
self.model = PandasModel(self.raw)
self.tableView.setModel(self.model)
def getDataFiles(self,limit=6):
print('get Multiple Data Files called \n')
DataFilesInput, filetype = QFileDialog.getOpenFileNames(self, u'Choose Data File',
'~/',
'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)') # 设置文件扩展名过滤,注意用双分号间隔
# print(DataFileInput,filetype)
DataFramesList = []
if len(DataFilesInput) >= 1 :
for i in range(len(DataFilesInput)):
if i < limit:
if ('csv' in DataFilesInput[i]):
DataFramesList.append(pd.read_csv(DataFilesInput[i], engine='python'))
elif ('xls' in DataFilesInput[i]):
DataFramesList.append(pd.read_excel(DataFilesInput[i]))
else:
#self.ErrorEvent(text='You can only open up to 6 Data Files at a time.')
pass
return(DataFramesList,DataFilesInput)
def getDataFile(self,CleanOrNot=True):
_translate = QtCore.QCoreApplication.translate
DataFileInput, filetype = QFileDialog.getOpenFileName(self,_translate('MainWindow', u'Choose Data File'),
'~/',
'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)') # 设置文件扩展名过滤,注意用双分号间隔
# #print(DataFileInput,filetype)
self.DataLocation = DataFileInput
print(self.DataLocation )
if ('csv' in DataFileInput):
self.raw = pd.read_csv(DataFileInput, engine='python')
elif ('xls' in DataFileInput):
self.raw = pd.read_excel(DataFileInput)
# #print(self.raw)
if len(self.raw)>0:
self.model = PandasModel(self.raw)
#print(self.model._df)
self.tableView.setModel(self.model)
self.model = PandasModel(self.raw)
#print(self.model._df)
flag = 0
ItemsAvalibale = self.model._df.columns.values.tolist()
ItemsToTest = ['Label', 'Marker', 'Color', 'Size', 'Alpha', 'Style', 'Width']
for i in ItemsToTest:
if i not in ItemsAvalibale:
# print(i)
flag = flag + 1
if flag == 0:
pass
#reply = QMessageBox.information(self, _translate('MainWindow', 'Ready'), _translate('MainWindow', 'Everything fine and no need to set up.'))
else:
pass
#self.SetUpDataFile()
def getFileName(self,list=['C:/Users/Fred/Documents/GitHub/Writing/元素数据/Ag.xlsx']):
result=[]
for i in list:
result.append(i.split("/")[-1].split(".")[0])
return(result)
def saveDataFile(self):
# if self.model._changed == True:
# print('changed')
# #print(self.model._df)
DataFileOutput, ok2 = QFileDialog.getSaveFileName(self,_translate('MainWindow', u'Save Data File'),
'C:/',
'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出
dftosave = self.model._df
#self.model._df.reset_index(drop=True)
if "Label" in dftosave.columns.values.tolist():
dftosave = dftosave.set_index('Label')
if (DataFileOutput != ''):
dftosave.reset_index(drop=True)
if ('csv' in DataFileOutput):
dftosave.to_csv(DataFileOutput, sep=',', encoding='utf-8')
elif ('xls' in DataFileOutput):
dftosave.to_excel(DataFileOutput, encoding='utf-8')
def OldCombine(self):
print('Combine called \n')
pass
DataFilesInput, filetype = QFileDialog.getOpenFileNames(self, _translate('MainWindow', u'Choose Data File'),
'~/',
'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)') # 设置文件扩展名过滤,注意用双分号间隔
# #print(DataFileInput,filetype)
DataFramesList=[]
if len(DataFilesInput)>1:
for i in DataFilesInput:
if ('csv' in i):
DataFramesList.append(pd.read_csv(i), engine='python')
elif ('xls' in i):
DataFramesList.append(pd.read_excel(i))
pass
#result = pd.concat(DataFramesList,axis=1,sort=False)
result = pd.concat(DataFramesList, ignore_index=True, sort=False)
DataFileOutput, ok2 = QFileDialog.getSaveFileName(self,_translate('MainWindow', u'Save Data File'),
'C:/',
'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出
dftosave = result
if (DataFileOutput != ''):
dftosave.reset_index(drop=True)
if ('csv' in DataFileOutput):
dftosave.to_csv(DataFileOutput, sep=',', encoding='utf-8')
elif ('xls' in DataFileOutput):
dftosave.to_excel(DataFileOutput, encoding='utf-8')
def Combine(self):
print('Combine called \n')
pass
DataFilesInput, filetype = QFileDialog.getOpenFileNames(self, _translate('MainWindow', u'Choose Data File'),
'~/',
'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)') # 设置文件扩展名过滤,注意用双分号间隔
# #print(DataFileInput,filetype)
DataFramesList = []
if len(DataFilesInput) > 1:
for i in DataFilesInput:
if ('csv' in i):
DataFramesList.append(pd.read_csv(i, engine='python'))
elif ('xls' in i):
DataFramesList.append(pd.read_excel(i))
pass
# result = pd.concat(DataFramesList,axis=1,sort=False)
result = pd.concat(DataFramesList, ignore_index=True, sort=False)
print('self.model._df length: ', len(result))
if (len(result) > 0):
self.Combinepop = MyCombine(df=result)
self.Combinepop.Combine()
def getFileName(self,list=['C:/Users/Fred/Documents/GitHub/Writing/元素数据/Ag.xlsx']):
result=[]
for i in list:
result.append(i.split("/")[-1].split(".")[0])
#print(result)
return(result)
def Combine_transverse(self):
print('Combine called \n')
DataFilesInput, filetype = QFileDialog.getOpenFileNames(self, _translate('MainWindow', u'Choose Data File'),
'~/',
'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)') # 设置文件扩展名过滤,注意用双分号间隔
DataFramesList = []
filenamelist = self.getFileName(DataFilesInput)
if len(DataFilesInput) > 1:
for i in range(len(DataFilesInput)):
tmpdf=pd.DataFrame()
if ('csv' in DataFilesInput[i]):
tmpdf=pd.read_csv(DataFilesInput[i], engine='python')
elif ('xls' in DataFilesInput[i]):
tmpdf=pd.read_excel(DataFilesInput[i])
#name_list = tmpdf.columns.values.tolist()
tmpname_dic={}
tmpname_list =[]
oldname_list=tmpdf.columns.values.tolist()
for k in oldname_list:
tmpname_list.append(filenamelist[i]+k)
tmpname_dic[k]=filenamelist[i]+' '+k
print(tmpname_dic)
tmpdf = tmpdf.rename(index=str, columns=tmpname_dic)
DataFramesList.append(tmpdf)
# result = pd.concat(DataFramesList,axis=1,sort=False)
result = pd.concat(DataFramesList,axis=1, ignore_index=False, sort=False)
print('self.model._df length: ', len(result))
if (len(result) > 0):
self.Combinepop = MyCombine(df=result)
self.Combinepop.Combine()
def Flatten(self):
print('Flatten called \n')
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
self.flattenpop = MyFlatten(df=self.model._df)
self.flattenpop.Flatten()
def Trans(self):
print('Trans called \n')
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
self.transpop = MyTrans(df=self.model._df)
self.transpop.Trans()
def ReFormat(self):
print('ReFormat called \n')
Datas= self.getDataFiles()
def RemoveLOI(self):
_translate = QtCore.QCoreApplication.translate
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
self.model._df_back= self.model._df
if (len(self.model._df) > 0):
loi_list = ['LOI', 'loi', 'Loi']
all_list = ['Total', 'total', 'TOTAL', 'ALL', 'All', 'all']
itemstocheck = ['Total', 'total', 'TOTAL', 'ALL', 'All', 'all','Al2O3', 'MgO', 'FeO', 'Fe2O3', 'CaO', 'Na2O', 'K2O', 'TiO2', 'P2O5', 'SiO2','TFe2O3','MnO','TFeO']
for i in range(len(self.model._df)):
Loi_flag = False
for k in all_list:
if k in self.model._df.columns.values:
de_loi = self.model._df.iloc[i][k]
for m in itemstocheck:
if m in self.model._df.columns.values:
self.model._df.at[i,m]= 100* self.model._df.at[i,m]/de_loi
Loi_flag= True
break
else:
Loi_flag = False
for j in loi_list:
if Loi_flag == False:
if j in self.model._df.columns.values:
de_loi = 100 - self.model._df.iloc[i][k]
for m in itemstocheck:
if m in self.model._df.columns.values:
self.model._df.at[i,m] = 100 * self.model._df.at[i,m] / de_loi
Loi_flag = True
break
else:
Loi_flag = False
if Loi_flag == False:
tmp_all=0
for m in itemstocheck:
if m in self.model._df.columns.values:
tmp_all = tmp_all+ self.model._df.at[i,m]
if round(tmp_all) != 100:
print(tmp_all)
for m in itemstocheck:
if m in self.model._df.columns.values:
self.model._df.at[i, m] = 100 * self.model._df.at[i, m] / tmp_all
reply = QMessageBox.information(self, _translate('MainWindow', 'Done'), _translate('MainWindow',
'LOI has been removed!:'))
def TAS(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.taspop = TAS(df=self.model._df)
self.taspop.TAS()
self.taspop.show()
try:
self.taspop.TAS()
self.taspop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Saccani(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df) <= 0):
self.getDataFile()
if (len(self.model._df) > 0):
self.sacpop = Saccani(df=self.model._df)
try:
self.sacpop.Saccani()
self.sacpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Raman(self):
print('self.model._df length: ',len(self.raw))
if (len(self.raw) <= 0):
self.getDataFile()
if (len(self.raw) > 0):
self.ramanpop = Raman(df=self.raw,filename= self.DataLocation)
try:
self.ramanpop.Raman()
self.ramanpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def FluidInclusion(self):
print('self.model._df length: ',len(self.raw))
if (len(self.raw) <= 0):
self.getDataFile()
if (len(self.raw) > 0):
self.FluidInclusionpop = FluidInclusion(df=self.raw,filename= self.DataLocation)
try:
self.FluidInclusionpop.FluidInclusion()
self.FluidInclusionpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def MyHist(self):
print('self.model._df length: ',len(self.raw))
if (len(self.raw) <= 0):
self.getDataFile()
if (len(self.raw) > 0):
self.MyHistpop = MyHist(df=self.raw,filename= self.DataLocation)
self.MyHistpop.MyHist()
self.MyHistpop.show()
try:
self.MyHistpop.MyHist()
self.MyHistpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def REE(self):
print('self.model._df length: ',len(self.model._df))
if len(self.Standard)>0:
print('self.Standard length: ', len(self.Standard))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.reepop = REE(df=self.model._df,Standard=self.Standard)
try:
self.reepop.REE()
self.reepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Trace(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.tracepop = Trace(df=self.model._df,Standard=self.Standard)
try:
self.tracepop.Trace()
self.tracepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def TraceNew(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.TraceNewpop = TraceNew(df=self.model._df,Standard=self.Standard)
try:
self.TraceNewpop.Trace()
self.TraceNewpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Pearce(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.pearcepop = Pearce(df=self.model._df)
try:
self.pearcepop.Pearce()
self.pearcepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Harker(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.harkerpop = Harker(df=self.model._df)
try:
self.harkerpop.Harker()
self.harkerpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def HarkerOld(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.harkeroldpop = HarkerOld(df=self.model._df)
try:
self.harkeroldpop.HarkerOld()
self.harkeroldpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def CIPW(self):
print('self.model._df length: ', len(self.model._df))
if (len(self.model._df) <= 0):
self.getDataFile()
if (len(self.model._df) > 0):
self.cipwpop = CIPW(df=self.model._df)
try:
self.cipwpop.CIPW()
self.cipwpop.show()
except():
self.ErrorEvent()
def Niggli(self):
print('self.model._df length: ', len(self.model._df))
if (len(self.model._df) <= 0):
self.getDataFile()
if (len(self.model._df) > 0):
self.Nigglipop = Niggli(df=self.model._df)
try:
self.Nigglipop.Niggli()
self.Nigglipop.show()
except():
self.ErrorEvent()
def ZirconTiTemp(self):
print('self.model._df length: ', len(self.model._df))
if (len(self.model._df) <= 0):
self.getDataFile()
if (len(self.model._df) > 0):
self.ztpop = ZirconTiTemp(df=self.model._df)
try:
self.ztpop.ZirconTiTemp()
self.ztpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def RutileZrTemp(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.rzpop = RutileZrTemp(df=self.model._df)
try:
self.rzpop.RutileZrTemp()
self.rzpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Cluster(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
try:
self.clusterpop = Cluster(df=self.model._df)
self.clusterpop.Cluster()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Stereo(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.stereopop = Stereo(df=self.model._df)
try:
self.stereopop.Stereo()
self.stereopop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Rose(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.rosepop = Rose(df=self.model._df)
try:
self.rosepop.Rose()
self.rosepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def QFL(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.qflpop = QFL(df=self.model._df)
self.qflpop.Tri()
self.qflpop.show()
try:
self.qflpop.Tri()
self.qflpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def QmFLt(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.qmfltpop = QmFLt(df=self.model._df)
try:
self.qmfltpop.Tri()
self.qmfltpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Clastic(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.clusterpop = Clastic(df=self.model._df)
try:
self.clusterpop.Tri()
self.clusterpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def CIA(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.ciapop = CIA(df=self.model._df)
try:
self.ciapop.CIA()
self.ciapop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def QAPF(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
ItemsAvalibale = self.model._df.columns.values.tolist()
if 'Q' in ItemsAvalibale and 'A' in ItemsAvalibale and 'P' in ItemsAvalibale and 'F' in ItemsAvalibale:
self.qapfpop = QAPF(df=self.model._df)
try:
self.qapfpop.QAPF()
self.qapfpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
else:
reply = QMessageBox.information(self, _translate('MainWindow', 'Warning'), _translate('MainWindow',
'Your data contain no Q/A/P/F data.\n Maybe you need to run CIPW first?'))
def ZirconCe(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile(CleanOrNot=False)
# print('Opening a new popup window...')
if (len(self.model._df) > 0):
self.zirconpop = ZirconCe(df=self.model._df)
try:
self.zirconpop.MultiBallard()
self.zirconpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def ZirconCeOld(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile(CleanOrNot=False)
# print('Opening a new popup window...')
if (len(self.model._df) > 0):
self.zirconoldpop = ZirconCeOld(df=self.model._df)
try:
self.zirconoldpop.MultiBallard()
self.zirconoldpop.show()
except(KeyError,ValueError,TypeError):
self.ErrorEvent()
def RbSrIsoTope(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.rbsrisotopepop = IsoTope(df=self.model._df,description='Rb-Sr IsoTope diagram', xname='87Rb/86Sr',
yname='87Sr/86Sr', lambdaItem=1.42e-11, xlabel=r'$^{87}Rb/^{86}Sr$', ylabel=r'$^{87}Sr/^{86}Sr$')
try:
self.rbsrisotopepop.Magic()
self.rbsrisotopepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def SmNdIsoTope(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.smndisotopepop = IsoTope(df=self.model._df,description='Sm-Nd IsoTope diagram', xname='147Sm/144Nd',
yname= '143Nd/144Nd', lambdaItem=6.54e-12, xlabel=r'$^{147}Sm/^{144}Nd$', ylabel=r'$^{143}Nd/^{144}Nd$')
try:
self.smndisotopepop.Magic()
self.smndisotopepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def KArIsoTope(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.karisotopepop = KArIsoTope(df=self.model._df)
try:
self.karisotopepop.Magic()
self.karisotopepop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def K2OSiO2(self):
print('self.model._df length: ', len(self.model._df))
if (len(self.model._df) <= 0):
self.getDataFile()
if (len(self.model._df) > 0):
self.taspop = K2OSiO2(df=self.model._df)
try:
self.taspop.K2OSiO2()
self.taspop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def XY(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.xypop = XY(df=self.model._df,Standard=self.Standard)
try:
self.xypop.Magic()
self.xypop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def XYZ(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.xyzpop = XYZ(df=self.model._df,Standard=self.Standard)
try:
self.xyzpop.Magic()
self.xyzpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def MultiDimension(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
self.mdpop = MultiDimension(df=self.model._df)
try:
self.mdpop.Magic()
self.mdpop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def ThreeD(self):
print('ThreeD called \n')
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df) > 0):
self.ThreeDpop = MyThreeD( DataFiles = [self.model._df],DataLocation= [self.DataLocation])
self.ThreeDpop.ThreeD()
else:
DataFiles, DataLocation = self.getDataFiles()
print(len(DataFiles),len(DataLocation))
if len(DataFiles)>0:
self.ThreeDpop = MyThreeD( DataFiles = DataFiles,DataLocation= DataLocation)
self.ThreeDpop.ThreeD()
def TwoD(self):
print('TwoD called \n')
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df) > 0):
self.TwoDpop = MyTwoD( DataFiles = [self.model._df],DataLocation= [self.DataLocation])
self.TwoDpop.TwoD()
else:
DataFiles, DataLocation = self.getDataFiles()
print(len(DataFiles),len(DataLocation))
if len(DataFiles)>0:
self.TwoDpop = MyTwoD( DataFiles = DataFiles,DataLocation= DataLocation)
self.TwoDpop.TwoD()
def TwoD_Grey(self):
print('TwoD_Grey called \n')
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df) > 0):
self.TwoDpop = MyTwoD_Grey( DataFiles = [self.model._df],DataLocation= [self.DataLocation])
self.TwoDpop.TwoD()
else:
DataFiles, DataLocation = self.getDataFiles()
print(len(DataFiles),len(DataLocation))
if len(DataFiles)>0:
self.TwoDpop = MyTwoD_Grey( DataFiles = DataFiles,DataLocation= DataLocation)
self.TwoDpop.TwoD()
def Dist(self):
print('Dist called \n')
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
try:
self.Distpop = MyDist(df=self.model._df)
self.Distpop.Dist()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Sta(self):
#Sta on Calculated Distance
print('Sta called \n')
pass
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
self.stapop = MySta(df=self.model._df)
self.stapop.Sta()
try:
self.stapop = MySta(df=self.model._df)
self.stapop.Sta()
except Exception as e:
tmp_msg='\n This is to do Sta on Calculated Distance.\n'
self.ErrorEvent(text=tmp_msg+repr(e))
def PCA(self):
print('PCA called \n')
pass
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
self.pcapop = MyPCA(df=self.model._df)
try:
self.pcapop.Key_Func()
self.pcapop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def FA(self):
print('FA called \n')
pass
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
pass
if (len(self.model._df) > 0):
self.fapop = MyFA(df=self.model._df)
self.fapop.Key_Func()
self.fapop.show()
try:
self.fapop.Key_Func()
self.fapop.show()
except Exception as e:
self.ErrorEvent(text=repr(e))
def Tri(self):
pass
def Auto(self):
print('self.model._df length: ',len(self.model._df))
if (len(self.model._df)<=0):
self.getDataFile()
if (len(self.model._df) > 0):
TotalResult=[]
df = self.model._df
AutoResult = 0
FileOutput, ok1 = QFileDialog.getSaveFileName(self,
'文件保存',
'C:/',
'PDF Files (*.pdf)') # 数据文件保存输出
if (FileOutput != ''):
AutoResult = pd.DataFrame()
pdf = matplotlib.backends.backend_pdf.PdfPages(FileOutput)
cipwsilent = CIPW(df=df)
cipwsilent.CIPW()
cipwsilent.QAPFsilent()
# TotalResult.append(cipwsilent.OutPutFig)
pdf.savefig(cipwsilent.OutPutFig)
# AutoResult = pd.concat([cipwsilent.OutPutData, AutoResult], axis=1)
tassilent = TAS(df=df)
tassilent.TAS()
tassilent.GetResult()
# TotalResult.append(tassilent.OutPutFig)
pdf.savefig(tassilent.OutPutFig)
AutoResult = pd.concat([tassilent.OutPutData, AutoResult], axis=1)
reesilent = REE(df=df,Standard=self.Standard)
if (reesilent.Check() == True):
reesilent.REE()
reesilent.GetResult()
# TotalResult.append(reesilent.OutPutFig)
pdf.savefig(reesilent.OutPutFig)
AutoResult = pd.concat([reesilent.OutPutData, AutoResult], axis=1)
tracesilent = Trace(df=df,Standard=self.Standard)
if (tracesilent.Check() == True):
tracesilent.Trace()
tracesilent.GetResult()
TotalResult.append(tracesilent.OutPutFig)
pdf.savefig(tracesilent.OutPutFig)
harkersilent = Harker(df=df)
harkersilent.Harker()
harkersilent.GetResult()
TotalResult.append(harkersilent.OutPutFig)
pdf.savefig(harkersilent.OutPutFig)
pearcesilent = Pearce(df=df)
pearcesilent.Pearce()
pearcesilent.GetResult()
TotalResult.append(pearcesilent.OutPutFig)
pdf.savefig(pearcesilent.OutPutFig)
AutoResult = AutoResult.T.groupby(level=0).first().T
pdf.close()
AutoResult = AutoResult.set_index('Label')
AutoResult=AutoResult.drop_duplicates()
print(AutoResult.shape, cipwsilent.newdf3.shape)
try:
AutoResult = pd.concat([cipwsilent.newdf3, AutoResult], axis=1)
except(ValueError):
pass
if ('pdf' in FileOutput):
FileOutput = FileOutput[0:-4]
AutoResult.to_csv(FileOutput + '-chemical-info.csv', sep=',', encoding='utf-8')
cipwsilent.newdf.to_csv(FileOutput + '-cipw-mole.csv', sep=',', encoding='utf-8')
cipwsilent.newdf1.to_csv(FileOutput + '-cipw-mass.csv', sep=',', encoding='utf-8')
cipwsilent.newdf2.to_csv(FileOutput + '-cipw-volume.csv', sep=',', encoding='utf-8')
cipwsilent.newdf3.to_csv(FileOutput + '-cipw-index.csv', sep=',', encoding='utf-8')
else:
pass
def main():
import sys
app = QtWidgets.QApplication(sys.argv)
trans = QtCore.QTranslator()
# trans.load('cn') # 没有后缀.qm
app.installTranslator(trans)
mainWin = Ui_MainWindow()
mainWin.retranslateUi()
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.