repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Bismarrck/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py
|
25
|
13554
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for v in list(feeder.input_dtype.values()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for v in list(inp.values()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.cached_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.cached_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
# TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't
# support permutation based indexing lookups (More documentation at
# http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing)
def DISABLED_test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
file_path = os.path.join(self._base_dir, 'test_hdf5.h5')
h5f = h5py.File(file_path, 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File(file_path, 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
simpeg/simpeg
|
examples/05-mag/plot_analytic.py
|
1
|
1990
|
"""
PF: Magnetics: Analytics
========================
Comparing the magnetics field in Vancouver to Seoul
"""
import numpy as np
from SimPEG import PF
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def run(plotIt=True):
xr = np.linspace(-300, 300, 41)
yr = np.linspace(-300, 300, 41)
X, Y = np.meshgrid(xr, yr)
Z = np.ones((np.size(xr), np.size(yr)))*150
# Bz component in Korea
inckr = -8. + 3./60
deckr = 54. + 9./60
btotkr = 50898.6
Bokr = PF.MagAnalytics.IDTtoxyz(inckr, deckr, btotkr)
bx, by, bz = PF.MagAnalytics.MagSphereAnaFunA(
X, Y, Z, 100., 0., 0., 0., 0.01, Bokr, 'secondary'
)
Bzkr = np.reshape(bz, (np.size(xr), np.size(yr)), order='F')
# Bz component in Canada
incca = 16. + 49./60
decca = 70. + 19./60
btotca = 54692.1
Boca = PF.MagAnalytics.IDTtoxyz(incca, decca, btotca)
bx, by, bz = PF.MagAnalytics.MagSphereAnaFunA(
X, Y, Z, 100., 0., 0., 0., 0.01, Boca, 'secondary'
)
Bzca = np.reshape(bz, (np.size(xr), np.size(yr)), order='F')
if plotIt:
plt.figure(figsize=(14, 5))
ax1 = plt.subplot(121)
dat1 = plt.imshow(Bzkr, extent=[min(xr), max(xr), min(yr), max(yr)])
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
ax1.set_xlabel('East-West (m)')
ax1.set_ylabel('South-North (m)')
plt.colorbar(dat1, cax=cax1)
ax1.set_title('$B_z$ field at Seoul, South Korea')
ax2 = plt.subplot(122)
dat2 = plt.imshow(Bzca, extent=[min(xr), max(xr), min(yr), max(yr)])
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
ax2.set_xlabel('East-West (m)')
ax2.set_ylabel('South-North (m)')
plt.colorbar(dat2, cax=cax2)
ax2.set_title('$B_z$ field at Vancouver, Canada')
if __name__ == '__main__':
run()
plt.show()
|
mit
|
jmschrei/scikit-learn
|
sklearn/cluster/bicluster.py
|
66
|
19850
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
florian-f/sklearn
|
sklearn/datasets/tests/test_lfw.py
|
1
|
6867
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by runnning the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
# PIL is not properly installed, skip those tests
raise SkipTest
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
tgsmith61591/skutil
|
skutil/preprocessing/impute.py
|
1
|
27111
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, is_classifier
from sklearn.ensemble import BaggingRegressor, BaggingClassifier
from sklearn.externals import six
from sklearn.utils.validation import check_is_fitted
from abc import ABCMeta
from skutil.base import SelectiveMixin, BaseSkutil
from ..utils import is_entirely_numeric, get_numeric, validate_is_pd, is_numeric
from ..utils.fixes import is_iterable
__all__ = [
'BaggedImputer',
'BaggedCategoricalImputer',
'ImputerMixin',
'SelectiveImputer'
]
def _validate_all_numeric(X):
"""Validate that all columns in X
are numeric types. If not, raises a
``ValueError``
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The dataframe to validate
Raises
------
``ValueError`` if not all columns are numeric
"""
if not is_entirely_numeric(X):
raise ValueError('provided columns must be of only numeric columns')
def _col_mode(col):
"""Get the mode from a series.
Returns
-------
com : int, float
The column's most common value.
"""
vals = col.value_counts()
com = vals.index[0] if not np.isnan(vals.index[0]) else vals.index[1]
return com
def _val_values(vals):
"""Validate that all values in the iterable
are either numeric, or in ('mode', 'median', 'mean').
If not, will raise a TypeError
Raises
------
``TypeError`` if not all values are numeric or
in valid values.
"""
if not all([
(is_numeric(i) or (isinstance(i, six.string_types)) and i in ('mode', 'mean', 'median'))
for i in vals
]):
raise TypeError('All values in self.fill must be numeric or in ("mode", "mean", "median"). '
'Got: %s' % ', '.join(vals))
class ImputerMixin:
"""A mixin for all imputer classes. Contains the default fill value.
This mixin is used for the H2O imputer, as well.
Attributes
----------
_def_fill : int (default=-999999)
The default fill value for NaN values
"""
_def_fill = -999999
class _BaseImputer(six.with_metaclass(ABCMeta, BaseSkutil, TransformerMixin, ImputerMixin)):
"""A base class for all imputers. Handles assignment of the fill value.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
as_df : bool, optional (default=True)
Whether to return a Pandas DataFrame in the ``transform``
method. If False, will return a NumPy ndarray instead.
Since most skutil transformers depend on explicitly-named
DataFrame features, the ``as_df`` parameter is True by default.
fill : int, float, string or array_like, optional (default=None)
The fill values to use for missing values in columns
Attributes
----------
fill : float, int, None or str
The fill
"""
def __init__(self, cols=None, as_df=True, fill=None):
super(_BaseImputer, self).__init__(cols=cols, as_df=as_df)
self.fill = fill if fill is not None else self._def_fill
class SelectiveImputer(_BaseImputer):
"""A more customizable form on sklearn's ``Imputer`` class. This class
can handle more than mean, median or most common... it will also take
numeric values. Moreover, it will take a vector of strategies or values
with which to impute corresponding columns.
Parameters
----------
cols : array_like, optional (default=None)
The columns on which the transformer will be ``fit``. In
the case that ``cols`` is None, the transformer will be fit
on all columns. Note that since this transformer can only operate
on numeric columns, not explicitly setting the ``cols`` parameter
may result in errors for categorical data.
as_df : bool, optional (default=True)
Whether to return a Pandas DataFrame in the ``transform``
method. If False, will return a NumPy ndarray instead.
Since most skutil transformers depend on explicitly-named
DataFrame features, the ``as_df`` parameter is True by default.
fill : int, float, string or array_like, optional (default=None)
the fill to use for missing values in the training matrix
when fitting a ``SelectiveImputer``. If None, will default to 'mean'
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from skutil.preprocessing import SelectiveImputer
>>>
>>> nan = np.nan
>>> X = pd.DataFrame.from_records(data=np.array([
... [1.0, nan, 3.1],
... [nan, 2.3, nan],
... [2.1, 2.1, 3.1]]),
... columns=['a','b','c'])
>>> imputer = SelectiveImputer(fill=['mean', -999, 'mode'])
>>> imputer.fit_transform(X)
a b c
0 1.00 -999.0 3.1
1 1.55 2.3 3.1
2 2.10 2.1 3.1
Attributes
----------
fills_ : iterable, int or float
The imputer fill-values
"""
def __init__(self, cols=None, as_df=True, fill='mean'):
super(SelectiveImputer, self).__init__(cols, as_df, fill)
def fit(self, X, y=None):
"""Fit the imputer and return the
transformed matrix or frame.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = self.cols if self.cols is not None else X.columns.values
# validate the fill, do fit
fill = self.fill
if isinstance(fill, six.string_types):
fill = str(fill)
if fill not in ('mode', 'mean', 'median'):
raise TypeError('self.fill must be either "mode", "mean", "median", None, '
'a number, or an iterable. Got %s' % fill)
if fill == 'mode':
# for each column to impute, we go through and get the value counts
# of each, sorting by the max...
self.fills_ = dict(zip(cols, X[cols].apply(lambda x: _col_mode(x))))
elif fill == 'median':
self.fills_ = dict(zip(cols, X[cols].apply(lambda x: np.nanmedian(x.values))))
else:
self.fills_ = dict(zip(cols, X[cols].apply(lambda x: np.nanmean(x.values))))
# if the fill is an iterable, we have to get a bit more stringent on our validation
elif is_iterable(fill):
# if fill is a dictionary
if isinstance(fill, dict):
# if it's a dict, we can assume that these are the cols...
cols, fill = zip(*fill.items())
self.cols = cols # we reset self.cols in this case!!!
# we need to get the length of the iterable,
# make sure it matches the len of cols
if not len(fill) == len(cols):
raise ValueError('len of fill does not match that of cols')
# make sure they're all ints
_val_values(fill)
d = {}
for ind, c in enumerate(cols):
f = fill[ind]
if is_numeric(f):
d[c] = f
else:
the_col = X[c]
if f == 'mode':
d[c] = _col_mode(the_col)
elif f == 'median':
d[c] = np.nanmedian(the_col.values)
else:
d[c] = np.nanmean(the_col.values)
self.fills_ = d
else:
if not is_numeric(fill):
raise TypeError('self.fill must be either "mode", "mean", "median", None, '
'a number, or an iterable. Got %s' % str(fill))
# either the fill is an int, or it's something the user provided...
# if it's not an int or float, we'll let it go and not catch it because
# the it's their fault they were dumb.
self.fills_ = fill
return self
def transform(self, X):
"""Transform a dataframe given the fit imputer.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to transform.
Returns
-------
X : pd.DataFrame or np.ndarray
The imputed matrix
"""
check_is_fitted(self, 'fills_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
cols = self.cols if self.cols is not None else X.columns.values
# get the fills
modes = self.fills_
# if it's a single int, easy:
if isinstance(modes, int):
X[cols] = X[cols].fillna(modes)
else:
# it's a dict
for nm in cols:
X[nm] = X[nm].fillna(modes[nm])
return X if self.as_df else X.as_matrix()
class _BaseBaggedImputer(_BaseImputer):
"""Base class for all bagged imputers. See subclasses
``BaggedCategoricalImputer`` and ``BaggedImputer`` for specifics.
"""
def __init__(self, cols=None, base_estimator=None, n_estimators=10,
max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, n_jobs=1, random_state=None, verbose=0, as_df=True,
fill=None, is_classification=False):
super(_BaseBaggedImputer, self).__init__(cols=cols, as_df=as_df, fill=fill)
# set self attributes
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.is_classification = is_classification
def fit(self, X, y=None):
"""Fit the bagged imputer.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
"""Fit the bagged imputer and return the
transformed (imputed) matrix.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
X : pd.DataFrame or np.ndarray
The imputed matrix.
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = self.cols if self.cols is not None else X.columns.values
# subset, validate
# we have to validate that all of the columns we're going to impute
# are numeric (this could be float, or int...).
_validate_all_numeric(X[cols])
# we need to get all of the numerics out of X, because these are
# the features we'll be modeling on.
numeric_cols = get_numeric(X)
numerics = X[numeric_cols]
# if is_classification and our estimator is NOT, then we need to raise
if self.base_estimator is not None:
if self.is_classification and not is_classifier(self.base_estimator):
raise TypeError('self.is_classification=True, '
'but base_estimator is not a classifier')
# set which estimator type to fit:
_model = BaggingRegressor if not self.is_classification else BaggingClassifier
# if there's only one numeric, we know at this point it's the one
# we're imputing. In that case, there's too few cols on which to model
if numerics.shape[1] == 1:
raise ValueError('too few numeric columns on which to model')
# the core algorithm:
# - for each col to impute
# - subset to all numeric columns except the col to impute
# - retain only the complete observations, separate the missing observations
# - build a bagging regressor model to predict for observations with missing values
# - fill in missing values in a copy of the dataframe
models = {}
for col in cols:
x = numerics.copy() # get copy of numerics for this model iteration
y_missing = pd.isnull(x[col]) # boolean vector of which are missing in the current y
y = x.pop(col) # pop off the y vector from the matrix
# if y_missing is all of the rows, we need to bail
if y_missing.sum() == x.shape[0]:
raise ValueError('%s has all missing values, cannot train model' % col)
# at this point we've identified which y values we need to predict, however, we still
# need to prep our x matrix... There are a few corner cases we need to account for:
#
# 1. there are no complete rows in the X matrix
# - we can eliminate some columns to model on in this case, but there's no silver bullet
# 2. the cols selected for model building are missing in the rows needed to impute.
# - this is a hard solution that requires even more NA imputation...
#
# the most "catch-all" solution is going to be to fill all missing values with some val, say -999999
x = x.fillna(self.fill)
X_train = x[~y_missing] # the rows that don't correspond to missing y values
X_test = x[y_missing] # the rows to "predict" on
y_train = y[~y_missing] # the training y vector
# define the model
model = _model(
base_estimator=self.base_estimator,
n_estimators=self.n_estimators,
max_samples=self.max_samples,
max_features=self.max_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
# fit the model
model.fit(X_train, y_train)
# predict on the missing values, stash the model and the features used to train it
if X_test.shape[0] != 0: # only do this step if there are actually any missing
y_pred = model.predict(X_test)
X.loc[y_missing, col] = y_pred # fill the y vector missing slots and reassign back to X
models[col] = {
'model': model,
'feature_names': X_train.columns.values
}
# assign the model dict to self -- this is the "fit" portion
self.models_ = models
return X if self.as_df else X.as_matrix()
def transform(self, X):
"""Impute the test data after fit.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to transform.
Returns
-------
dropped : Pandas DataFrame or NumPy ndarray
The test frame sans "bad" columns
"""
check_is_fitted(self, 'models_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
# perform the transformations for missing vals
models = self.models_
for col, kv in six.iteritems(models):
features, model = kv['feature_names'], kv['model']
y = X[col] # the y we're predicting
# this will throw a key error if one of the features isn't there
X_test = X[features] # we need another copy
# if col is in the features, there's something wrong internally
assert col not in features, 'predictive column should not be in fit features (%s)' % col
# since this is a copy, we can add the missing vals where needed
X_test = X_test.fillna(self.fill)
# generate predictions, subset where y was null
y_null = pd.isnull(y)
pred_y = model.predict(X_test.loc[y_null])
# fill where necessary:
if y_null.sum() > 0:
y[y_null] = pred_y # fill where null
X[col] = y # set back to X
return X if self.as_df else X.as_matrix()
class BaggedCategoricalImputer(_BaseBaggedImputer):
"""Performs imputation on select columns by using BaggingRegressors
on the provided columns.
cols : array_like, optional (default=None)
The columns on which the transformer will be ``fit``. In
the case that ``cols`` is None, the transformer will be fit
on all columns. Note that since this transformer can only operate
on numeric columns, not explicitly setting the ``cols`` parameter
may result in errors for categorical data.
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
If int, then draw max_samples samples.
If float, then draw max_samples * X.shape[0] samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
If int, then draw max_features features.
If float, then draw max_features * X.shape[1] features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the generalization error.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both fit and predict. If -1,
then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If None,
the random number generator is the RandomState instance used by np.random.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
as_df : bool, optional (default=True)
Whether to return a Pandas DataFrame in the ``transform``
method. If False, will return a NumPy ndarray instead.
Since most skutil transformers depend on explicitly-named
DataFrame features, the ``as_df`` parameter is True by default.
fill : int, optional (default=None)
the fill to use for missing values in the training matrix
when fitting a BaggingClassifier. If None, will default to -999999
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from skutil.preprocessing import BaggedCategoricalImputer
>>>
>>> nan = np.nan
>>> X = pd.DataFrame.from_records(data=np.array([
... [1.0, nan, 4.0],
... [nan, 1.0, nan],
... [2.0, 2.0, 3.0]]),
... columns=['a','b','c'])
>>> imputer = BaggedCategoricalImputer(random_state=42)
>>> imputer.fit_transform(X)
a b c
0 1.0 2.0 4.0
1 2.0 1.0 4.0
2 2.0 2.0 3.0
Attributes
----------
models_ : dict, (string : ``sklearn.base.BaseEstimator``)
A dictionary mapping column names to the fit
bagged estimator.
"""
def __init__(self, cols=None, base_estimator=None, n_estimators=10,
max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, n_jobs=1, random_state=None, verbose=0, as_df=True, fill=None):
# categorical imputer needs to be classification
super(BaggedCategoricalImputer, self).__init__(
cols=cols, as_df=as_df, fill=fill,
base_estimator=base_estimator, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features, bootstrap=bootstrap,
bootstrap_features=bootstrap_features, oob_score=oob_score,
n_jobs=n_jobs, random_state=random_state, verbose=verbose,
is_classification=True)
class BaggedImputer(_BaseBaggedImputer):
"""Performs imputation on select columns by using BaggingRegressors
on the provided columns.
cols : array_like, optional (default=None)
The columns on which the transformer will be ``fit``. In
the case that ``cols`` is None, the transformer will be fit
on all columns. Note that since this transformer can only operate
on numeric columns, not explicitly setting the ``cols`` parameter
may result in errors for categorical data.
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
If int, then draw max_samples samples.
If float, then draw max_samples * X.shape[0] samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
If int, then draw max_features features.
If float, then draw max_features * X.shape[1] features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the generalization error.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both fit and predict. If -1,
then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If None,
the random number generator is the RandomState instance used by np.random.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
as_df : bool, optional (default=True)
Whether to return a Pandas DataFrame in the ``transform``
method. If False, will return a NumPy ndarray instead.
Since most skutil transformers depend on explicitly-named
DataFrame features, the ``as_df`` parameter is True by default.
fill : int, optional (default=None)
the fill to use for missing values in the training matrix
when fitting a BaggingRegressor. If None, will default to -999999
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from skutil.preprocessing import BaggedImputer
>>>
>>> nan = np.nan
>>> X = pd.DataFrame.from_records(data=np.array([
... [1.0, nan, 3.1],
... [nan, 2.3, nan],
... [2.1, 2.1, 3.1]]),
... columns=['a','b','c'])
>>> imputer = BaggedImputer(random_state=42)
>>> imputer.fit_transform(X)
a b c
0 1.000 2.16 3.1
1 1.715 2.30 3.1
2 2.100 2.10 3.1
Attributes
----------
models_ : dict, (string : ``sklearn.base.BaseEstimator``)
A dictionary mapping column names to the fit
bagged estimator.
"""
def __init__(self, cols=None, base_estimator=None, n_estimators=10,
max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, n_jobs=1, random_state=None, verbose=0, as_df=True, fill=None):
# invoke super constructor
super(BaggedImputer, self).__init__(
cols=cols, as_df=as_df, fill=fill,
base_estimator=base_estimator, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features, bootstrap=bootstrap,
bootstrap_features=bootstrap_features, oob_score=oob_score,
n_jobs=n_jobs, random_state=random_state, verbose=verbose,
is_classification=False)
|
bsd-3-clause
|
selective-inference/selective-inference
|
doc/learning_examples/BH/logit_targets_BH_marginal.py
|
3
|
3050
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import full_model_inference, pivot_plot
from selection.learning.core import normal_sampler, logit_fit
from selection.learning.learners import mixture_learner
mixture_learner.scales = [1]*10 + [1.5,2,3,4,5,10]
def BHfilter(pval, q=0.2):
pval = np.asarray(pval)
pval_sort = np.sort(pval)
comparison = q * np.arange(1, pval.shape[0] + 1.) / pval.shape[0]
passing = pval_sort < comparison
if passing.sum():
thresh = comparison[np.nonzero(passing)[0].max()]
return np.nonzero(pval <= thresh)[0]
return []
def simulate(n=200, p=30, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=1000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, dispersion, sampler):
p = XTX.shape[0]
success = np.zeros(p)
scale = 0.
noisy_S = sampler(scale=scale)
solnZ = noisy_S / (np.sqrt(np.diag(XTX)) * np.sqrt(dispersion))
pval = ndist.cdf(solnZ)
pval = 2 * np.minimum(pval, 1 - pval)
return set(BHfilter(pval, q=0.2))
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
success_params=(1, 1),
B=B,
fit_probability=logit_fit,
fit_args={'df':20})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(500):
df = simulate(B=5000)
csvfile = 'logit_targets_BH_marginal.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
|
bsd-3-clause
|
ycaihua/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
34
|
2573
|
import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from .test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
|
bsd-3-clause
|
kaichogami/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
drammock/mne-python
|
examples/simulation/simulate_raw_data.py
|
19
|
2830
|
"""
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source activation
multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, Epochs, compute_covariance, make_ad_hoc_cov
from mne.datasets import sample
from mne.simulation import (simulate_sparse_stc, simulate_raw,
add_noise, add_ecg, add_eog)
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
rng = np.random.RandomState(0) # random state (make reproducible)
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=rng)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw.info, [stc] * 10, forward=fwd, verbose=True)
cov = make_ad_hoc_cov(raw_sim.info)
add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], random_state=rng)
add_ecg(raw_sim, random_state=rng)
add_eog(raw_sim, random_state=rng)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
verbose='error') # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
|
bsd-3-clause
|
guedou/scapy-codecov
|
scapy/layers/inet.py
|
1
|
61293
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
IPv4 (Internet Protocol v4).
"""
import os,time,struct,re,socket,new
from select import select
from collections import defaultdict
from scapy.utils import checksum,inet_aton,inet_ntoa
from scapy.base_classes import Gen
from scapy.data import *
from scapy.layers.l2 import *
from scapy.config import conf
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.plist import PacketList,SndRcvList
from scapy.automaton import Automaton,ATMT
from scapy.error import warning
import scapy.as_resolvers
from scapy.arch import plt, MATPLOTLIB_INLINED, MATPLOTLIB_DEFAULT_PLOT_KARGS
####################
## IP Tools class ##
####################
class IPTools(object):
"""Add more powers to a class with an "src" attribute."""
__slots__ = []
def whois(self):
os.system("whois %s" % self.src)
def ottl(self):
t = [32,64,128,255]+[self.ttl]
t.sort()
return t[t.index(self.ttl)+1]
def hops(self):
return self.ottl()-self.ttl-1
_ip_options_names = { 0: "end_of_list",
1: "nop",
2: "security",
3: "loose_source_route",
4: "timestamp",
5: "extended_security",
6: "commercial_security",
7: "record_route",
8: "stream_id",
9: "strict_source_route",
10: "experimental_measurement",
11: "mtu_probe",
12: "mtu_reply",
13: "flow_control",
14: "access_control",
15: "encode",
16: "imi_traffic_descriptor",
17: "extended_IP",
18: "traceroute",
19: "address_extension",
20: "router_alert",
21: "selective_directed_broadcast_mode",
23: "dynamic_packet_state",
24: "upstream_multicast_packet",
25: "quick_start",
30: "rfc4727_experiment",
}
class _IPOption_HDR(Packet):
fields_desc = [ BitField("copy_flag",0, 1),
BitEnumField("optclass",0,2,{0:"control",2:"debug"}),
BitEnumField("option",0,5, _ip_options_names) ]
class IPOption(Packet):
name = "IP Option"
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B", # Only option 0 and 1 have no length and value
length_of="value", adjust=lambda pkt,l:l+2),
StrLenField("value", "",length_from=lambda pkt:pkt.length-2) ]
def extract_padding(self, p):
return "",p
registered_ip_options = {}
@classmethod
def register_variant(cls):
cls.registered_ip_options[cls.option.default] = cls
@classmethod
def dispatch_hook(cls, pkt=None, *args, **kargs):
if pkt:
opt = ord(pkt[0])&0x1f
if opt in cls.registered_ip_options:
return cls.registered_ip_options[opt]
return cls
class IPOption_EOL(IPOption):
name = "IP Option End of Options List"
option = 0
fields_desc = [ _IPOption_HDR ]
class IPOption_NOP(IPOption):
name = "IP Option No Operation"
option=1
fields_desc = [ _IPOption_HDR ]
class IPOption_Security(IPOption):
name = "IP Option Security"
copy_flag = 1
option = 2
fields_desc = [ _IPOption_HDR,
ByteField("length", 11),
ShortField("security",0),
ShortField("compartment",0),
ShortField("handling_restrictions",0),
StrFixedLenField("transmission_control_code","xxx",3),
]
class IPOption_RR(IPOption):
name = "IP Option Record Route"
option = 7
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="routers", adjust=lambda pkt,l:l+3),
ByteField("pointer",4), # 4 is first IP
FieldListField("routers",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-3)
]
def get_current_router(self):
return self.routers[self.pointer/4-1]
class IPOption_LSRR(IPOption_RR):
name = "IP Option Loose Source and Record Route"
copy_flag = 1
option = 3
class IPOption_SSRR(IPOption_RR):
name = "IP Option Strict Source and Record Route"
copy_flag = 1
option = 9
class IPOption_Stream_Id(IPOption):
name = "IP Option Stream ID"
copy_flag = 1
option = 8
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("security",0), ]
class IPOption_MTU_Probe(IPOption):
name = "IP Option MTU Probe"
option = 11
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("mtu",0), ]
class IPOption_MTU_Reply(IPOption_MTU_Probe):
name = "IP Option MTU Reply"
option = 12
class IPOption_Traceroute(IPOption):
name = "IP Option Traceroute"
option = 18
fields_desc = [ _IPOption_HDR,
ByteField("length", 12),
ShortField("id",0),
ShortField("outbound_hops",0),
ShortField("return_hops",0),
IPField("originator_ip","0.0.0.0") ]
class IPOption_Address_Extension(IPOption):
name = "IP Option Address Extension"
copy_flag = 1
option = 19
fields_desc = [ _IPOption_HDR,
ByteField("length", 10),
IPField("src_ext","0.0.0.0"),
IPField("dst_ext","0.0.0.0") ]
class IPOption_Router_Alert(IPOption):
name = "IP Option Router Alert"
copy_flag = 1
option = 20
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortEnumField("alert",0, {0:"router_shall_examine_packet"}), ]
class IPOption_SDBM(IPOption):
name = "IP Option Selective Directed Broadcast Mode"
copy_flag = 1
option = 21
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="addresses", adjust=lambda pkt,l:l+2),
FieldListField("addresses",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-2)
]
TCPOptions = (
{ 0 : ("EOL",None),
1 : ("NOP",None),
2 : ("MSS","!H"),
3 : ("WScale","!B"),
4 : ("SAckOK",None),
5 : ("SAck","!"),
8 : ("Timestamp","!II"),
14 : ("AltChkSum","!BH"),
15 : ("AltChkSumOpt",None),
25 : ("Mood","!p"),
28 : ("UTO", "!H"),
34 : ("TFO", "!II"),
},
{ "EOL":0,
"NOP":1,
"MSS":2,
"WScale":3,
"SAckOK":4,
"SAck":5,
"Timestamp":8,
"AltChkSum":14,
"AltChkSumOpt":15,
"Mood":25,
"UTO":28,
"TFO":34,
} )
class TCPOptionsField(StrField):
islist=1
def getfield(self, pkt, s):
opsz = (pkt.dataofs-5)*4
if opsz < 0:
warning("bad dataofs (%i). Assuming dataofs=5"%pkt.dataofs)
opsz = 0
return s[opsz:],self.m2i(pkt,s[:opsz])
def m2i(self, pkt, x):
opt = []
while x:
onum = ord(x[0])
if onum == 0:
opt.append(("EOL",None))
x=x[1:]
break
if onum == 1:
opt.append(("NOP",None))
x=x[1:]
continue
olen = ord(x[1])
if olen < 2:
warning("Malformed TCP option (announced length is %i)" % olen)
olen = 2
oval = x[2:olen]
if TCPOptions[0].has_key(onum):
oname, ofmt = TCPOptions[0][onum]
if onum == 5: #SAck
ofmt += "%iI" % (len(oval)/4)
if ofmt and struct.calcsize(ofmt) == len(oval):
oval = struct.unpack(ofmt, oval)
if len(oval) == 1:
oval = oval[0]
opt.append((oname, oval))
else:
opt.append((onum, oval))
x = x[olen:]
return opt
def i2m(self, pkt, x):
opt = ""
for oname,oval in x:
if type(oname) is str:
if oname == "NOP":
opt += "\x01"
continue
elif oname == "EOL":
opt += "\x00"
continue
elif TCPOptions[1].has_key(oname):
onum = TCPOptions[1][oname]
ofmt = TCPOptions[0][onum][1]
if onum == 5: #SAck
ofmt += "%iI" % len(oval)
if ofmt is not None and (type(oval) is not str or "s" in ofmt):
if type(oval) is not tuple:
oval = (oval,)
oval = struct.pack(ofmt, *oval)
else:
warning("option [%s] unknown. Skipped."%oname)
continue
else:
onum = oname
if type(oval) is not str:
warning("option [%i] is not string."%onum)
continue
opt += chr(onum)+chr(2+len(oval))+oval
return opt+"\x00"*(3-((len(opt)+3)%4))
def randval(self):
return [] # XXX
class ICMPTimeStampField(IntField):
re_hmsm = re.compile("([0-2]?[0-9])[Hh:](([0-5]?[0-9])([Mm:]([0-5]?[0-9])([sS:.]([0-9]{0,3}))?)?)?$")
def i2repr(self, pkt, val):
if val is None:
return "--"
else:
sec, milli = divmod(val, 1000)
min, sec = divmod(sec, 60)
hour, min = divmod(min, 60)
return "%d:%d:%d.%d" %(hour, min, sec, int(milli))
def any2i(self, pkt, val):
if type(val) is str:
hmsms = self.re_hmsm.match(val)
if hmsms:
h,_,m,_,s,_,ms = hmsms = hmsms.groups()
ms = int(((ms or "")+"000")[:3])
val = ((int(h)*60+int(m or 0))*60+int(s or 0))*1000+ms
else:
val = 0
elif val is None:
val = int((time.time()%(24*60*60))*1000)
return val
class DestIPField(IPField, DestField):
bindings = {}
def __init__(self, name, default):
IPField.__init__(self, name, None)
DestField.__init__(self, name, default)
def i2m(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IPField.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IPField.i2h(self, pkt, x)
class IP(Packet, IPTools):
__slots__ = ["_defrag_pos"]
name = "IP"
fields_desc = [ BitField("version" , 4 , 4),
BitField("ihl", None, 4),
XByteField("tos", 0),
ShortField("len", None),
ShortField("id", 1),
FlagsField("flags", 0, 3, ["MF","DF","evil"]),
BitField("frag", 0, 13),
ByteField("ttl", 64),
ByteEnumField("proto", 0, IP_PROTOS),
XShortField("chksum", None),
#IPField("src", "127.0.0.1"),
Emph(SourceIPField("src","dst")),
Emph(DestIPField("dst", "127.0.0.1")),
PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
def post_build(self, p, pay):
ihl = self.ihl
p += "\0"*((-len(p))%4) # pad IP options if needed
if ihl is None:
ihl = len(p)/4
p = chr(((self.version&0xf)<<4) | ihl&0x0f)+p[1:]
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+struct.pack("!H", l)+p[4:]
if self.chksum is None:
ck = checksum(p)
p = p[:10]+chr(ck>>8)+chr(ck&0xff)+p[12:]
return p+pay
def extract_padding(self, s):
l = self.len - (self.ihl << 2)
return s[:l],s[l:]
def send(self, s, slp=0):
for p in self:
try:
s.sendto(str(p), (p.dst,0))
except socket.error, msg:
log_runtime.error(msg)
if slp:
time.sleep(slp)
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = iter(dst).next()
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route
return conf.route.route(dst)
def hashret(self):
if ( (self.proto == socket.IPPROTO_ICMP)
and (isinstance(self.payload, ICMP))
and (self.payload.type in [3,4,5,11,12]) ):
return self.payload.payload.hashret()
if not conf.checkIPinIP and self.proto in [4, 41]: # IP, IPv6
return self.payload.hashret()
if self.dst == "224.0.0.251": # mDNS
return struct.pack("B", self.proto) + self.payload.hashret()
if conf.checkIPsrc and conf.checkIPaddr:
return (strxor(inet_aton(self.src), inet_aton(self.dst))
+ struct.pack("B",self.proto) + self.payload.hashret())
return struct.pack("B", self.proto) + self.payload.hashret()
def answers(self, other):
if not conf.checkIPinIP: # skip IP in IP and IPv6 in IP
if self.proto in [4, 41]:
return self.payload.answers(other)
if isinstance(other, IP) and other.proto in [4, 41]:
return self.answers(other.payload)
if conf.ipv6_enabled \
and isinstance(other, scapy.layers.inet6.IPv6) \
and other.nh in [4, 41]:
return self.answers(other.payload)
if not isinstance(other,IP):
return 0
if conf.checkIPaddr:
if other.dst == "224.0.0.251" and self.dst == "224.0.0.251": # mDNS
return self.payload.answers(other.payload)
elif (self.dst != other.src):
return 0
if ( (self.proto == socket.IPPROTO_ICMP) and
(isinstance(self.payload, ICMP)) and
(self.payload.type in [3,4,5,11,12]) ):
# ICMP error message
return self.payload.payload.answers(other)
else:
if ( (conf.checkIPaddr and (self.src != other.dst)) or
(self.proto != other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
s = self.sprintf("%IP.src% > %IP.dst% %IP.proto%")
if self.frag:
s += " frag:%i" % self.frag
return s
def fragment(self, fragsize=1480):
"""Fragment IP datagrams"""
fragsize = (fragsize+7)/8*8
lst = []
fnb = 0
fl = self
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = str(p[fnb].payload)
nb = (len(s)+fragsize-1)/fragsize
for i in xrange(nb):
q = p.copy()
del(q[fnb].payload)
del(q[fnb].chksum)
del(q[fnb].len)
if i != nb - 1:
q[fnb].flags |= 1
q[fnb].frag += i * fragsize / 8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[fnb].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
class TCP(Packet):
name = "TCP"
fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
ShortEnumField("dport", 80, TCP_SERVICES),
IntField("seq", 0),
IntField("ack", 0),
BitField("dataofs", None, 4),
BitField("reserved", 0, 3),
FlagsField("flags", 0x2, 9, "FSRPAUECN"),
ShortField("window", 8192),
XShortField("chksum", None),
ShortField("urgptr", 0),
TCPOptionsField("options", {}) ]
def post_build(self, p, pay):
p += pay
dataofs = self.dataofs
if dataofs is None:
dataofs = 5+((len(self.get_field("options").i2m(self,self.options))+3)/4)
p = p[:12]+chr((dataofs << 4) | ord(p[12])&0x0f)+p[13:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
if self.underlayer.ihl is None:
olen = sum(len(x) for x in self.underlayer.options)
ihl = 5 + olen / 4 + (1 if olen % 4 else 0)
else:
ihl = self.underlayer.ihl
ln = self.underlayer.len - 4 * ihl
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck=checksum(psdhdr+p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_TCP, self.underlayer, p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def hashret(self):
if conf.checkIPsrc:
return struct.pack("H",self.sport ^ self.dport)+self.payload.hashret()
else:
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
if (abs(other.seq-self.ack) > 2+len(other.payload)):
return 0
return 1
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("TCP %IP.src%:%TCP.sport% > %IP.dst%:%TCP.dport% %TCP.flags%")
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("TCP %IPv6.src%:%TCP.sport% > %IPv6.dst%:%TCP.dport% %TCP.flags%")
else:
return self.sprintf("TCP %TCP.sport% > %TCP.dport% %TCP.flags%")
class UDP(Packet):
name = "UDP"
fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
ShortEnumField("dport", 53, UDP_SERVICES),
ShortField("len", None),
XShortField("chksum", None), ]
def post_build(self, p, pay):
p += pay
l = self.len
if l is None:
l = len(p)
p = p[:4]+struct.pack("!H",l)+p[6:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
if self.underlayer.ihl is None:
olen = sum(len(x) for x in self.underlayer.options)
ihl = 5 + olen / 4 + (1 if olen % 4 else 0)
else:
ihl = self.underlayer.ihl
ln = self.underlayer.len - 4 * ihl
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck = checksum(psdhdr+p)
# According to RFC768 if the result checksum is 0, it should be set to 0xFFFF
if ck == 0:
ck = 0xFFFF
p = p[:6]+struct.pack("!H", ck)+p[8:]
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_UDP, self.underlayer, p)
# According to RFC2460 if the result checksum is 0, it should be set to 0xFFFF
if ck == 0:
ck = 0xFFFF
p = p[:6]+struct.pack("!H", ck)+p[8:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def extract_padding(self, s):
l = self.len - 8
return s[:l],s[l:]
def hashret(self):
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if self.dport != other.sport:
return 0
return self.payload.answers(other.payload)
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("UDP %IP.src%:%UDP.sport% > %IP.dst%:%UDP.dport%")
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("UDP %IPv6.src%:%UDP.sport% > %IPv6.dst%:%UDP.dport%")
else:
return self.sprintf("UDP %UDP.sport% > %UDP.dport%")
icmptypes = { 0 : "echo-reply",
3 : "dest-unreach",
4 : "source-quench",
5 : "redirect",
8 : "echo-request",
9 : "router-advertisement",
10 : "router-solicitation",
11 : "time-exceeded",
12 : "parameter-problem",
13 : "timestamp-request",
14 : "timestamp-reply",
15 : "information-request",
16 : "information-response",
17 : "address-mask-request",
18 : "address-mask-reply" }
icmpcodes = { 3 : { 0 : "network-unreachable",
1 : "host-unreachable",
2 : "protocol-unreachable",
3 : "port-unreachable",
4 : "fragmentation-needed",
5 : "source-route-failed",
6 : "network-unknown",
7 : "host-unknown",
9 : "network-prohibited",
10 : "host-prohibited",
11 : "TOS-network-unreachable",
12 : "TOS-host-unreachable",
13 : "communication-prohibited",
14 : "host-precedence-violation",
15 : "precedence-cutoff", },
5 : { 0 : "network-redirect",
1 : "host-redirect",
2 : "TOS-network-redirect",
3 : "TOS-host-redirect", },
11 : { 0 : "ttl-zero-during-transit",
1 : "ttl-zero-during-reassembly", },
12 : { 0 : "ip-header-bad",
1 : "required-option-missing", }, }
class ICMP(Packet):
name = "ICMP"
fields_desc = [ ByteEnumField("type",8, icmptypes),
MultiEnumField("code",0, icmpcodes, depends_on=lambda pkt:pkt.type,fmt="B"),
XShortField("chksum", None),
ConditionalField(XShortField("id",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(XShortField("seq",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(ICMPTimeStampField("ts_ori", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_rx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_tx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(IPField("gw","0.0.0.0"), lambda pkt:pkt.type==5),
ConditionalField(ByteField("ptr",0), lambda pkt:pkt.type==12),
ConditionalField(ByteField("reserved",0), lambda pkt:pkt.type in [3,11]),
ConditionalField(ByteField("length",0), lambda pkt:pkt.type in [3,11,12]),
ConditionalField(IPField("addr_mask","0.0.0.0"), lambda pkt:pkt.type in [17,18]),
ConditionalField(ShortField("nexthopmtu",0), lambda pkt:pkt.type==3),
ConditionalField(ShortField("unused",0), lambda pkt:pkt.type in [11,12]),
ConditionalField(IntField("unused",0), lambda pkt:pkt.type not in [0,3,5,8,11,12,13,14,15,16,17,18])
]
def post_build(self, p, pay):
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
return p
def hashret(self):
if self.type in [0,8,13,14,15,16,17,18]:
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
return self.payload.hashret()
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if ( (other.type,self.type) in [(8,0),(13,14),(15,16),(17,18)] and
self.id == other.id and
self.seq == other.seq ):
return 1
return 0
def guess_payload_class(self, payload):
if self.type in [3,4,5,11,12]:
return IPerror
else:
return None
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("ICMP %IP.src% > %IP.dst% %ICMP.type% %ICMP.code%")
else:
return self.sprintf("ICMP %ICMP.type% %ICMP.code%")
class IPerror(IP):
name = "IP in ICMP"
def answers(self, other):
if not isinstance(other, IP):
return 0
if not ( ((conf.checkIPsrc == 0) or (self.dst == other.dst)) and
(self.src == other.src) and
( ((conf.checkIPID == 0)
or (self.id == other.id)
or (conf.checkIPID == 1 and self.id == socket.htons(other.id)))) and
(self.proto == other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
return Packet.mysummary(self)
class TCPerror(TCP):
name = "TCP in ICMP"
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
if conf.check_TCPerror_seqack:
if self.seq is not None:
if self.seq != other.seq:
return 0
if self.ack is not None:
if self.ack != other.ack:
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class UDPerror(UDP):
name = "UDP in ICMP"
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class ICMPerror(ICMP):
name = "ICMP in ICMP"
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
if self.code in [0,8,13,14,17,18]:
if (self.id == other.id and
self.seq == other.seq):
return 1
else:
return 0
else:
return 1
def mysummary(self):
return Packet.mysummary(self)
bind_layers( Ether, IP, type=2048)
bind_layers( CookedLinux, IP, proto=2048)
bind_layers( GRE, IP, proto=2048)
bind_layers( SNAP, IP, code=2048)
bind_layers( Loopback, IP, type=2)
bind_layers( IPerror, IPerror, frag=0, proto=4)
bind_layers( IPerror, ICMPerror, frag=0, proto=1)
bind_layers( IPerror, TCPerror, frag=0, proto=6)
bind_layers( IPerror, UDPerror, frag=0, proto=17)
bind_layers( IP, IP, frag=0, proto=4)
bind_layers( IP, ICMP, frag=0, proto=1)
bind_layers( IP, TCP, frag=0, proto=6)
bind_layers( IP, UDP, frag=0, proto=17)
bind_layers( IP, GRE, frag=0, proto=47)
conf.l2types.register(101, IP)
conf.l2types.register_num2layer(12, IP)
conf.l3types.register(ETH_P_IP, IP)
conf.l3types.register_num2layer(ETH_P_ALL, IP)
def inet_register_l3(l2, l3):
return getmacbyip(l3.dst)
conf.neighbor.register_l3(Ether, IP, inet_register_l3)
conf.neighbor.register_l3(Dot3, IP, inet_register_l3)
###################
## Fragmentation ##
###################
@conf.commands.register
def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
fragsize = (fragsize+7)/8*8
lst = []
for p in pkt:
s = str(p[IP].payload)
nb = (len(s)+fragsize-1)/fragsize
for i in xrange(nb):
q = p.copy()
del(q[IP].payload)
del(q[IP].chksum)
del(q[IP].len)
if i != nb - 1:
q[IP].flags |= 1
q[IP].frag += i * fragsize / 8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None):
if overlap_fragsize is None:
overlap_fragsize = fragsize
q = p.copy()
del(q[IP].payload)
q[IP].add_payload(overlap)
qfrag = fragment(q, overlap_fragsize)
qfrag[-1][IP].flags |= 1
return qfrag+fragment(p, fragsize)
@conf.commands.register
def defrag(plist):
"""defrag(plist) -> ([not fragmented], [defragmented],
[ [bad fragments], [bad fragments], ... ])"""
frags = defaultdict(PacketList)
nofrag = PacketList()
for p in plist:
ip = p[IP]
if IP not in p:
nofrag.append(p)
continue
if ip.frag == 0 and ip.flags & 1 == 0:
nofrag.append(p)
continue
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
defrag = []
missfrag = []
for lst in frags.itervalues():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag.append(lst)
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag.append(lst)
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
defrag.append(p)
defrag2=PacketList()
for p in defrag:
defrag2.append(p.__class__(str(p)))
return nofrag,defrag2,missfrag
@conf.commands.register
def defragment(plist):
"""defrag(plist) -> plist defragmented as much as possible """
frags = defaultdict(lambda:[])
final = []
pos = 0
for p in plist:
p._defrag_pos = pos
pos += 1
if IP in p:
ip = p[IP]
if ip.frag != 0 or ip.flags & 1:
ip = p[IP]
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
continue
final.append(p)
defrag = []
missfrag = []
for lst in frags.itervalues():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag += lst
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag += lst
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
p._defrag_pos = max(x._defrag_pos for x in lst)
defrag.append(p)
defrag2=[]
for p in defrag:
q = p.__class__(str(p))
q._defrag_pos = p._defrag_pos
defrag2.append(q)
final += defrag2
final += missfrag
final.sort(key=lambda x: x._defrag_pos)
for p in final:
del(p._defrag_pos)
if hasattr(plist, "listname"):
name = "Defragmented %s" % plist.listname
else:
name = "Defragmented"
return PacketList(final, name=name)
### Add timeskew_graph() method to PacketList
def _packetlist_timeskew_graph(self, ip, **kargs):
"""Tries to graph the timeskew between the timestamps and real time for a given ip"""
# Filter TCP segments which source address is 'ip'
res = map(lambda x: self._elt2pkt(x), self.res)
b = filter(lambda x:x.haslayer(IP) and x.getlayer(IP).src == ip and x.haslayer(TCP), res)
# Build a list of tuples (creation_time, replied_timestamp)
c = []
for p in b:
opts = p.getlayer(TCP).options
for o in opts:
if o[0] == "Timestamp":
c.append((p.time,o[1][0]))
# Stop if the list is empty
if not c:
warning("No timestamps found in packet list")
return []
# Prepare the data that will be plotted
first_creation_time = c[0][0]
first_replied_timestamp = c[0][1]
def _wrap_data(ts_tuple, wrap_seconds=2000):
"""Wrap the list of tuples."""
ct,rt = ts_tuple # (creation_time, replied_timestamp)
X = ct % wrap_seconds
Y = ((ct-first_creation_time) - ((rt-first_replied_timestamp)/1000.0))
return X, Y
data = map(_wrap_data, c)
# Mimic the default gnuplot output
if kargs == {}:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
lines = plt.plot(data, **kargs)
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
PacketList.timeskew_graph = new.instancemethod(_packetlist_timeskew_graph, None, PacketList)
### Create a new packet list
class TracerouteResult(SndRcvList):
__slots__ = ["graphdef", "graphpadding", "graphASres", "padding", "hloc",
"nloc"]
def __init__(self, res=None, name="Traceroute", stats=None):
PacketList.__init__(self, res, name, stats)
self.graphdef = None
self.graphASres = 0
self.padding = 0
self.hloc = None
self.nloc = None
def show(self):
return self.make_table(lambda (s,r): (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
s.ttl,
r.sprintf("%-15s,IP.src% {TCP:%TCP.flags%}{ICMP:%ir,ICMP.type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IP not in s:
continue
d = s[IP].dst
if d not in trace:
trace[d] = {}
trace[d][s[IP].ttl] = r[IP].src, ICMP not in r
for k in trace.itervalues():
try:
m = min(x for x, y in k.itervalues() if y)
except ValueError:
continue
for l in k.keys(): # use .keys(): k is modified in the loop
if l > m:
del k[l]
return trace
def trace3D(self):
"""Give a 3D representation of the traceroute.
right button: rotate the scene
middle button: zoom
left button: move the scene
left button on a ball: toggle IP displaying
ctrl-left button on a ball: scan ports 21,22,23,25,80 and 443 and display the result"""
trace = self.get_trace()
import visual
class IPsphere(visual.sphere):
def __init__(self, ip, **kargs):
visual.sphere.__init__(self, **kargs)
self.ip=ip
self.label=None
self.setlabel(self.ip)
def setlabel(self, txt,visible=None):
if self.label is not None:
if visible is None:
visible = self.label.visible
self.label.visible = 0
elif visible is None:
visible=0
self.label=visual.label(text=txt, pos=self.pos, space=self.radius, xoffset=10, yoffset=20, visible=visible)
def action(self):
self.label.visible ^= 1
visual.scene = visual.display()
visual.scene.exit = True
start = visual.box()
rings={}
tr3d = {}
for i in trace:
tr = trace[i]
tr3d[i] = []
for t in xrange(1, max(tr) + 1):
if t not in rings:
rings[t] = []
if t in tr:
if tr[t] not in rings[t]:
rings[t].append(tr[t])
tr3d[i].append(rings[t].index(tr[t]))
else:
rings[t].append(("unk",-1))
tr3d[i].append(len(rings[t])-1)
for t in rings:
r = rings[t]
l = len(r)
for i in xrange(l):
if r[i][1] == -1:
col = (0.75,0.75,0.75)
elif r[i][1]:
col = visual.color.green
else:
col = visual.color.blue
s = IPsphere(pos=((l-1)*visual.cos(2*i*visual.pi/l),(l-1)*visual.sin(2*i*visual.pi/l),2*t),
ip = r[i][0],
color = col)
for trlst in tr3d.itervalues():
if t <= len(trlst):
if trlst[t-1] == i:
trlst[t-1] = s
forecol = colgen(0.625, 0.4375, 0.25, 0.125)
for trlst in tr3d.itervalues():
col = forecol.next()
start = (0,0,0)
for ip in trlst:
visual.cylinder(pos=start,axis=ip.pos-start,color=col,radius=0.2)
start = ip.pos
movcenter=None
while 1:
visual.rate(50)
if visual.scene.kb.keys:
k = visual.scene.kb.getkey()
if k == "esc" or k == "q":
break
if visual.scene.mouse.events:
ev = visual.scene.mouse.getevent()
if ev.press == "left":
o = ev.pick
if o:
if ev.ctrl:
if o.ip == "unk":
continue
savcolor = o.color
o.color = (1,0,0)
a,b=sr(IP(dst=o.ip)/TCP(dport=[21,22,23,25,80,443]),timeout=2)
o.color = savcolor
if len(a) == 0:
txt = "%s:\nno results" % o.ip
else:
txt = "%s:\n" % o.ip
for s,r in a:
txt += r.sprintf("{TCP:%IP.src%:%TCP.sport% %TCP.flags%}{TCPerror:%IPerror.dst%:%TCPerror.dport% %IP.src% %ir,ICMP.type%}\n")
o.setlabel(txt, visible=1)
else:
if hasattr(o, "action"):
o.action()
elif ev.drag == "left":
movcenter = ev.pos
elif ev.drop == "left":
movcenter = None
if movcenter:
visual.scene.center -= visual.scene.mouse.pos-movcenter
movcenter = visual.scene.mouse.pos
def world_trace(self, **kargs):
"""Display traceroute results on a world map."""
# Check that the GeoIP module can be imported
try:
import GeoIP
except ImportError:
message = "Can't import GeoIP. Won't be able to plot the world."
scapy.utils.log_loading.info(message)
return list()
# Check if this is an IPv6 traceroute and load the correct file
if isinstance(self, scapy.layers.inet6.TracerouteResult6):
geoip_city_filename = conf.geoip_city_ipv6
else:
geoip_city_filename = conf.geoip_city
# Check that the GeoIP database can be opened
try:
db = GeoIP.open(conf.geoip_city, 0)
except:
message = "Can't open GeoIP database at %s" % conf.geoip_city
scapy.utils.log_loading.info(message)
return list()
# Regroup results per trace
ips = {}
rt = {}
ports_done = {}
for s,r in self.res:
ips[r.src] = None
if s.haslayer(TCP) or s.haslayer(UDP):
trace_id = (s.src,s.dst,s.proto,s.dport)
elif s.haslayer(ICMP):
trace_id = (s.src,s.dst,s.proto,s.type)
else:
trace_id = (s.src,s.dst,s.proto,0)
trace = rt.get(trace_id,{})
if not r.haslayer(ICMP) or r.type != 11:
if ports_done.has_key(trace_id):
continue
ports_done[trace_id] = None
trace[s.ttl] = r.src
rt[trace_id] = trace
# Get the addresses locations
trt = {}
for trace_id in rt:
trace = rt[trace_id]
loctrace = []
for i in xrange(max(trace)):
ip = trace.get(i,None)
if ip is None:
continue
loc = db.record_by_addr(ip)
if loc is None:
continue
loc = loc.get('longitude'), loc.get('latitude')
if loc == (None, None):
continue
loctrace.append(loc)
if loctrace:
trt[trace_id] = loctrace
# Load the map renderer
from mpl_toolkits.basemap import Basemap
bmap = Basemap()
# Split latitudes and longitudes per traceroute measurement
locations = [zip(*tr) for tr in trt.itervalues()]
# Plot the traceroute measurement as lines in the map
lines = [bmap.plot(*bmap(lons, lats)) for lons, lats in locations]
# Draw countries
bmap.drawcoastlines()
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
# Return the drawn lines
return lines
def make_graph(self,ASres=None,padding=0):
if ASres is None:
ASres = conf.AS_resolver
self.graphASres = ASres
self.graphpadding = padding
ips = {}
rt = {}
ports = {}
ports_done = {}
for s,r in self.res:
r = r.getlayer(IP) or (conf.ipv6_enabled and r[scapy.layers.inet6.IPv6]) or r
s = s.getlayer(IP) or (conf.ipv6_enabled and s[scapy.layers.inet6.IPv6]) or s
ips[r.src] = None
if TCP in s:
trace_id = (s.src,s.dst,6,s.dport)
elif UDP in s:
trace_id = (s.src,s.dst,17,s.dport)
elif ICMP in s:
trace_id = (s.src,s.dst,1,s.type)
else:
trace_id = (s.src,s.dst,s.proto,0)
trace = rt.get(trace_id,{})
ttl = conf.ipv6_enabled and scapy.layers.inet6.IPv6 in s and s.hlim or s.ttl
if not (ICMP in r and r[ICMP].type == 11) and not (conf.ipv6_enabled and scapy.layers.inet6.IPv6 in r and scapy.layers.inet6.ICMPv6TimeExceeded in r):
if trace_id in ports_done:
continue
ports_done[trace_id] = None
p = ports.get(r.src,[])
if TCP in r:
p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%"))
trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%')
elif UDP in r:
p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%"))
trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%')
elif ICMP in r:
p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%"))
trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
else:
p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}"))
trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}')
ports[r.src] = p
else:
trace[ttl] = r.sprintf('"%r,src%"')
rt[trace_id] = trace
# Fill holes with unk%i nodes
unknown_label = incremental_label("unk%i")
blackholes = []
bhip = {}
for rtk in rt:
trace = rt[rtk]
max_trace = max(trace)
for n in xrange(min(trace), max_trace):
if not trace.has_key(n):
trace[n] = unknown_label.next()
if not ports_done.has_key(rtk):
if rtk[2] == 1: #ICMP
bh = "%s %i/icmp" % (rtk[1],rtk[3])
elif rtk[2] == 6: #TCP
bh = "%s %i/tcp" % (rtk[1],rtk[3])
elif rtk[2] == 17: #UDP
bh = '%s %i/udp' % (rtk[1],rtk[3])
else:
bh = '%s %i/proto' % (rtk[1],rtk[2])
ips[bh] = None
bhip[rtk[1]] = bh
bh = '"%s"' % bh
trace[max_trace + 1] = bh
blackholes.append(bh)
# Find AS numbers
ASN_query_list = set(x.rsplit(" ",1)[0] for x in ips)
if ASres is None:
ASNlist = []
else:
ASNlist = ASres.resolve(*ASN_query_list)
ASNs = {}
ASDs = {}
for ip,asn,desc, in ASNlist:
if asn is None:
continue
iplist = ASNs.get(asn,[])
if ip in bhip:
if ip in ports:
iplist.append(ip)
iplist.append(bhip[ip])
else:
iplist.append(ip)
ASNs[asn] = iplist
ASDs[asn] = desc
backcolorlist=colgen("60","86","ba","ff")
forecolorlist=colgen("a0","70","40","20")
s = "digraph trace {\n"
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
s += "\n#ASN clustering\n"
for asn in ASNs:
s += '\tsubgraph cluster_%s {\n' % asn
col = backcolorlist.next()
s += '\t\tcolor="#%s%s%s";' % col
s += '\t\tnode [fillcolor="#%s%s%s",style=filled];' % col
s += '\t\tfontsize = 10;'
s += '\t\tlabel = "%s\\n[%s]"\n' % (asn,ASDs[asn])
for ip in ASNs[asn]:
s += '\t\t"%s";\n'%ip
s += "\t}\n"
s += "#endpoints\n"
for p in ports:
s += '\t"%s" [shape=record,color=black,fillcolor=green,style=filled,label="%s|%s"];\n' % (p,p,"|".join(ports[p]))
s += "\n#Blackholes\n"
for bh in blackholes:
s += '\t%s [shape=octagon,color=black,fillcolor=red,style=filled];\n' % bh
if padding:
s += "\n#Padding\n"
pad={}
for snd,rcv in self.res:
if rcv.src not in ports and rcv.haslayer(conf.padding_layer):
p = rcv.getlayer(conf.padding_layer).load
if p != "\x00"*len(p):
pad[rcv.src]=None
for rcv in pad:
s += '\t"%s" [shape=triangle,color=black,fillcolor=red,style=filled];\n' % rcv
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
for rtk in rt:
s += "#---[%s\n" % `rtk`
s += '\t\tedge [color="#%s%s%s"];\n' % forecolorlist.next()
trace = rt[rtk]
maxtrace = max(trace)
for n in xrange(min(trace), maxtrace):
s += '\t%s ->\n' % trace[n]
s += '\t%s;\n' % trace[maxtrace]
s += "}\n";
self.graphdef = s
def graph(self, ASres=None, padding=0, **kargs):
"""x.graph(ASres=conf.AS_resolver, other args):
ASres=None : no AS resolver => no clustering
ASres=AS_resolver() : default whois AS resolver (riswhois.ripe.net)
ASres=AS_resolver_cymru(): use whois.cymru.com whois database
ASres=AS_resolver(server="whois.ra.net")
type: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program
prog: which graphviz program to use"""
if ASres is None:
ASres = conf.AS_resolver
if (self.graphdef is None or
self.graphASres != ASres or
self.graphpadding != padding):
self.make_graph(ASres,padding)
return do_graph(self.graphdef, **kargs)
@conf.commands.register
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, filter=None, timeout=2, verbose=None, **kargs):
"""Instant TCP traceroute
traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None
"""
if verbose is None:
verbose = conf.verb
if filter is None:
# we only consider ICMP error packets and TCP packets with at
# least the ACK flag set *and* either the SYN or the RST flag
# set
filter="(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))"
if l4 is None:
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
# this should always work
filter="ip"
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/l4,
timeout=timeout, filter=filter, verbose=verbose, **kargs)
a = TracerouteResult(a.res)
if verbose:
a.show()
return a,b
#############################
## Simple TCP client stack ##
#############################
class TCP_client(Automaton):
def parse_args(self, ip, port, *args, **kargs):
self.dst = iter(Net(ip)).next()
self.dport = port
self.sport = random.randrange(0,2**16)
self.l4 = IP(dst=ip)/TCP(sport=self.sport, dport=self.dport, flags=0,
seq=random.randrange(0,2**32))
self.src = self.l4.src
self.swin=self.l4[TCP].window
self.dwin=1
self.rcvbuf=""
bpf = "host %s and host %s and port %i and port %i" % (self.src,
self.dst,
self.sport,
self.dport)
# bpf=None
Automaton.parse_args(self, filter=bpf, **kargs)
def master_filter(self, pkt):
return (IP in pkt and
pkt[IP].src == self.dst and
pkt[IP].dst == self.src and
TCP in pkt and
pkt[TCP].sport == self.dport and
pkt[TCP].dport == self.sport and
self.l4[TCP].seq >= pkt[TCP].ack and # XXX: seq/ack 2^32 wrap up
((self.l4[TCP].ack == 0) or (self.l4[TCP].ack <= pkt[TCP].seq <= self.l4[TCP].ack+self.swin)) )
@ATMT.state(initial=1)
def START(self):
pass
@ATMT.state()
def SYN_SENT(self):
pass
@ATMT.state()
def ESTABLISHED(self):
pass
@ATMT.state()
def LAST_ACK(self):
pass
@ATMT.state(final=1)
def CLOSED(self):
pass
@ATMT.condition(START)
def connect(self):
raise self.SYN_SENT()
@ATMT.action(connect)
def send_syn(self):
self.l4[TCP].flags = "S"
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(SYN_SENT)
def synack_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x12:
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(synack_received)
def send_ack_of_synack(self, pkt):
self.l4[TCP].ack = pkt[TCP].seq+1
self.l4[TCP].flags = "A"
self.send(self.l4)
@ATMT.receive_condition(ESTABLISHED)
def incoming_data_received(self, pkt):
if not isinstance(pkt[TCP].payload, NoPayload) and not isinstance(pkt[TCP].payload, conf.padding_layer):
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(incoming_data_received)
def receive_data(self,pkt):
data = str(pkt[TCP].payload)
if data and self.l4[TCP].ack == pkt[TCP].seq:
self.l4[TCP].ack += len(data)
self.l4[TCP].flags = "A"
self.send(self.l4)
self.rcvbuf += data
if pkt[TCP].flags & 8 != 0: #PUSH
self.oi.tcp.send(self.rcvbuf)
self.rcvbuf = ""
@ATMT.ioevent(ESTABLISHED,name="tcp", as_supersocket="tcplink")
def outgoing_data_received(self, fd):
raise self.ESTABLISHED().action_parameters(fd.recv())
@ATMT.action(outgoing_data_received)
def send_data(self, d):
self.l4[TCP].flags = "PA"
self.send(self.l4/d)
self.l4[TCP].seq += len(d)
@ATMT.receive_condition(ESTABLISHED)
def reset_received(self, pkt):
if pkt[TCP].flags & 4 != 0:
raise self.CLOSED()
@ATMT.receive_condition(ESTABLISHED)
def fin_received(self, pkt):
if pkt[TCP].flags & 0x1 == 1:
raise self.LAST_ACK().action_parameters(pkt)
@ATMT.action(fin_received)
def send_finack(self, pkt):
self.l4[TCP].flags = "FA"
self.l4[TCP].ack = pkt[TCP].seq+1
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(LAST_ACK)
def ack_of_fin_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x10:
raise self.CLOSED()
#####################
## Reporting stuff ##
#####################
def report_ports(target, ports):
"""portscan a target and output a LaTeX table
report_ports(target, ports) -> string"""
ans,unans = sr(IP(dst=target)/TCP(dport=ports),timeout=5)
rep = "\\begin{tabular}{|r|l|l|}\n\\hline\n"
for s,r in ans:
if not r.haslayer(ICMP):
if r.payload.flags == 0x12:
rep += r.sprintf("%TCP.sport% & open & SA \\\\\n")
rep += "\\hline\n"
for s,r in ans:
if r.haslayer(ICMP):
rep += r.sprintf("%TCPerror.dport% & closed & ICMP type %ICMP.type%/%ICMP.code% from %IP.src% \\\\\n")
elif r.payload.flags != 0x12:
rep += r.sprintf("%TCP.sport% & closed & TCP %TCP.flags% \\\\\n")
rep += "\\hline\n"
for i in unans:
rep += i.sprintf("%TCP.dport% & ? & unanswered \\\\\n")
rep += "\\hline\n\\end{tabular}\n"
return rep
def IPID_count(lst, funcID=lambda x:x[1].id, funcpres=lambda x:x[1].summary()):
idlst = map(funcID, lst)
idlst.sort()
classes = [idlst[0]]+map(lambda x:x[1],filter(lambda (x,y): abs(x-y)>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:])))
lst = map(lambda x:(funcID(x), funcpres(x)), lst)
lst.sort()
print "Probably %i classes:" % len(classes), classes
for id,pr in lst:
print "%5i" % id, pr
def fragleak(target,sport=123, dport=123, timeout=0.2, onlyasc=0):
load = "XXXXYYYYYYYYYY"
# getmacbyip(target)
# pkt = IP(dst=target, id=RandShort(), options="\x22"*40)/UDP()/load
pkt = IP(dst=target, id=RandShort(), options="\x00"*40, flags=1)/UDP(sport=sport, dport=sport)/load
s=conf.L3socket()
intr=0
found={}
try:
while 1:
try:
if not intr:
s.send(pkt)
sin,sout,serr = select([s],[],[],timeout)
if not sin:
continue
ans=s.recv(1600)
if not isinstance(ans, IP): #TODO: IPv6
continue
if not isinstance(ans.payload, ICMP):
continue
if not isinstance(ans.payload.payload, IPerror):
continue
if ans.payload.payload.dst != target:
continue
if ans.src != target:
print "leak from", ans.src,
# print repr(ans)
if not ans.haslayer(conf.padding_layer):
continue
# print repr(ans.payload.payload.payload.payload)
# if not isinstance(ans.payload.payload.payload.payload, conf.raw_layer):
# continue
# leak = ans.payload.payload.payload.payload.load[len(load):]
leak = ans.getlayer(conf.padding_layer).load
if leak not in found:
found[leak]=None
linehexdump(leak, onlyasc=onlyasc)
except KeyboardInterrupt:
if intr:
raise
intr=1
except KeyboardInterrupt:
pass
def fragleak2(target, timeout=0.4, onlyasc=0):
found={}
try:
while 1:
p = sr1(IP(dst=target, options="\x00"*40, proto=200)/"XXXXYYYYYYYYYYYY",timeout=timeout,verbose=0)
if not p:
continue
if conf.padding_layer in p:
leak = p[conf.padding_layer].load
if leak not in found:
found[leak]=None
linehexdump(leak,onlyasc=onlyasc)
except:
pass
conf.stats_classic_protocols += [TCP,UDP,ICMP]
conf.stats_dot11_protocols += [TCP,UDP,ICMP]
if conf.ipv6_enabled:
import scapy.layers.inet6
|
gpl-2.0
|
apurvbhartia/gnuradio-routing
|
gnuradio-examples/python/pfb/chirp_channelize.py
|
7
|
6936
|
#!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = gr.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = gr.sig_source_f(self._fs, gr.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = gr.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = gr.vco_f(self._fs, 225, 1)
self.f2c = gr.float_to_complex()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
EtienneCmb/brainpipe
|
brainpipe/feature/coupling/cfc.py
|
1
|
37136
|
from joblib import Parallel, delayed
from psutil import cpu_count
import numpy as np
from itertools import product
from scipy.special import erfinv
from scipy.stats import norm
from brainpipe.feature.utils._feat import (_manageWindow, _manageFrequencies,
_checkref)
from brainpipe.feature.filtering import fextract, docfilter
from brainpipe.feature.coupling.pac._pac import *
from brainpipe.feature.coupling.pac.pacmeth import *
from brainpipe.feature.utils._feat import normalize
from brainpipe.feature import power, phase, sigfilt
from brainpipe.tools import binarize, binArray
from brainpipe.statistics import perm_2pvalue, circ_corrcc, circ_rtest
from brainpipe.visual.cmon_plt import tilerplot
from brainpipe.visual import addLines
__all__ = ['pac',
'PhaseLockedPower',
'erpac',
'pfdphase',
'PLV'
]
windoc = """
window: tuple/list/None, optional [def: None]
List/tuple: [100,1500]
List of list/tuple: [(100,500),(200,4000)]
Width and step parameters will be ignored.
width: int, optional [def: None]
width of a single window.
step: int, optional [def: None]
Each window will be spaced by the "step" value.
time: list/array, optional [def: None]
Define a specific time vector
"""
Footnotes = """
.. rubric:: Footnotes
.. [#f1] `Canolty et al, 2006 <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2628289/>`_
.. [#f2] `Tort et al, 2010 <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2941206/>`_
.. [#f3] `Ozkurt et al, 2012 <http://www.ncbi.nlm.nih.gov/pubmed/22531738/>`_
.. [#f4] `Bahramisharif et al, 2013 <http://www.jneurosci.org/content/33/48/18849.short/>`_
.. [#f5] `Penny et al, 2008 <http://www.sciencedirect.com/science/article/pii/S0165027008003816>`_
"""
def cfcparafilt(xpha, xamp, n_jobs, self):
"""Parallel filtering through electrode dimension
"""
nelec = xpha.shape[0]
# Run para filtering :
data = Parallel(n_jobs=n_jobs)(delayed(_cfcparafilt)(
xpha[e, ...], xamp[e, ...], self,
) for e in range(nelec))
pha, amp = zip(*data)
return np.array(pha), np.array(amp)
def _cfcparafilt(xpha, xamp, self):
"""Sub parallel filtering function
"""
# Get the filter for phase/amplitude properties :
phaMeth = self._pha.get(self._sf, self._pha.f, self._npts)
ampMeth = self._amp.get(self._sf, self._amp.f, self._npts)
# Filt phase and amplitude :
pha = self._pha.apply(xpha, phaMeth)
amp = self._amp.apply(xamp, ampMeth)
return pha, amp
class _coupling(tilerplot):
"""
"""
def __init__(self, pha_f, pha_kind, pha_meth, pha_cycle,
amp_f, amp_kind, amp_meth, amp_cycle,
sf, npts, window, width, step, time, **kwargs):
# Define windows and frequency :
self._pha = fextract(kind=pha_kind, method=pha_meth,
cycle=pha_cycle, **kwargs)
self._amp = fextract(kind=amp_kind, method=amp_meth,
cycle=amp_cycle, **kwargs)
self._window, xvec = _manageWindow(npts, window=window,
width=width, step=step,
time=time)
self._pha.f, _, _ = _manageFrequencies(pha_f, split=None)
self._amp.f, _, _ = _manageFrequencies(amp_f, split=None)
if time is None:
time = np.arange(npts)
if self._window is None:
self._window = [(0, npts)]
self.time = np.array(self._window).mean()
# self.xvec = [0, npts]
else:
self.time = binArray(time, self._window)[0]
# Get variables :
self._width = width
self._step = step
self._nPha = len(self._pha.f)
self._nAmp = len(self._amp.f)
self._sf = sf
self._npts = npts
self._nwin = len(self._window)
self.pha = [np.mean(k) for k in self._pha.f]
self.amp = [np.mean(k) for k in self._amp.f]
class pac(_coupling):
"""Compute the phase-amplitude coupling (pac) either in local or
distant coupling. PAC require three things:
- Main method to compute it
- Surrogates to correct the true pac estimation
- A normalization method to correct pas by surrogates
Contributor: Juan LP Soto.
Args:
sf: int
Sampling frequency
npts: int
Number of points of the time serie
Kargs:
Id: string, optional, [def: '113']
The Id correspond to the way of computing pac. Id is composed of
three digits [ex : Id='210']
* First digit: refer to the pac method:
- '1': Mean Vector Length [#f1]_
- '2': Kullback-Leibler Divergence [#f2]_
- '3': Heights Ratio
- '4': Phase synchrony (or adapted PLV) [#f5]_
- '5': ndPAC [#f3]_
* Second digit: refer to the method for computing surrogates:
- '0': No surrogates
- '1': Swap trials phase/amplitude [#f2]_
- '2': Swap trials amplitude [#f4]_
- '3': Shuffle phase time-series
- '4': Shuffle amplitude time-series
- '5': Time lag [#f1]_ [NOT IMPLEMENTED YET]
- '6': Circular shifting [NOT IMPLEMENTED YET]
* Third digit: refer to the normalization method for correction:
- '0': No normalization
- '1': Substract the mean of surrogates
- '2': Divide by the mean of surrogates
- '3': Substract then divide by the mean of surrogates
- '4': Z-score
So, if Id='143', this mean that pac will be evaluate using the
Modulation Index ('1'), then surrogates are computing by randomly
shuffle amplitude values ('4') and finally, the true pac value
will be normalized by substracting then dividing by the mean of surrogates.
pha_f: tuple/list, optional, [def: [2,4]]
List containing the couple of frequency bands for the phase.
Example: f=[ [2,4], [5,7], [60,250] ]
pha_meth: string, optional, [def: 'hilbert']
Method for the phase extraction.
pha_cycle: integer, optional, [def: 3]
Number of cycles for filtering the phase.
amp_f: tuple/list, optional, [def: [60,200]]
List containing the couple of frequency bands for the amplitude.
Each couple can be either a list or a tuple.
amp_meth: string, optional, [def: 'hilbert']
Method for the amplitude extraction.
amp_cycle: integer, optional, [def: 6]
Number of cycles for filtering the amplitude.
nbins: integer, optional, [def: 18]
Some pac method (like Kullback-Leibler Distance or Heights Ratio) need
a binarization of the phase. nbins control the number of bins.
"""
__doc__ += windoc + docfilter + Footnotes
def __init__(self, sf, npts, Id='113', pha_f=[2, 4], pha_meth='hilbert',
pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6,
nbins=18, window=None, width=None, step=None, time=None,
**kwargs):
# Check pha and amp methods:
_checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2'])
_checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2'])
# Check the type of f:
if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)):
pha_f = binarize(
pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list')
if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)):
amp_f = binarize(
amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list')
self.xvec = []
# Initalize pac object :
self.Id = Id
me = Id[0]
# Manage settings :
# 1 - Choose if we extract phase or amplitude :
# - Methods using phase // amplitude :
if me in ['1', '2', '3', '5', '6']:
pha_kind, amp_kind = 'phase', 'amplitude'
# - Methods using phase // phase :
elif me in ['4']:
pha_kind, amp_kind = 'phase', 'amplitude'
# 2 - Specific case of Ozkurt :
if me == '5':
Id = '500'
# Initialize cfc :
_coupling.__init__(self, pha_f, pha_kind, pha_meth, pha_cycle,
amp_f, amp_kind, amp_meth, amp_cycle,
sf, npts, window, width, step, time, **kwargs)
# Get pac model :
_, _, _, ModelStr, SurStr, NormStr = CfcSettings(Id, nbins)
self.model = ['Method : '+ModelStr, 'Surrogates : '+SurStr,
'Normalization : '+NormStr]
self._nbins = nbins
def __str__(self):
phafilt = 'Phase : '+str(self._pha)
ampfilt = 'Amplitude : '+str(self._amp)
met = self.model[0]+',\n'+self.model[1]+',\n'+self.model[2]+',\n'
cfcStr = 'Crossfrequency Coupling(step='+str(self._step)+', width='+str(
self._width)+', Id='+self.Id+', nbins='+str(self._nbins)+',\n'+met
return cfcStr+phafilt+',\n'+ampfilt+')'
def get(self, xpha, xamp, n_perm=200, p=0.05, matricial=False, n_jobs=-1):
"""Get the normalized cfc mesure between an xpha and xamp signals.
Args:
xpha: array
Signal for phase. The shape of xpha should be :
(n_electrodes x n_pts x n_trials)
xamp: array
Signal for amplitude. The shape of xamp should be :
(n_electrodes x n_pts x n_trials)
Kargs:
n_perm: integer, optional, [def: 200]
Number of permutations for normalizing the cfc mesure.
p: float, optional, [def: 0.05]
p-value for the statistical method of Ozkurt 2012.
matricial: bool, optional, [def: False]
Some methods can work in matricial computation. This can lead
to a 10x or 30x time faster. But, please, monitor your RAM usage
beacause this parameter can use a lot of RAM. So, turn this parameter
in case of small computation.
n_jobs: integer, optional, [def: -1]
Control the number of jobs for parallel computing. Use 1, 2, ..
depending of your number or cores. -1 for all the cores.
If the same signal is used (example : xpha=x and xamp=x), this mean
the program compute a local cfc.
Returns:
ncfc: array
The cfc mesure of size :
(n_amplitude x n_phase x n_electrodes x n_windows x n_trials)
pvalue: array
The associated p-values of size :
(n_amplitude x n_phase x n_electrodes x n_windows)
"""
# Check the inputs variables :
xpha, xamp = _cfcCheck(xpha, xamp, self._npts)
self.n_perm = n_perm
self._matricial = matricial
if n_perm != 0:
self.p = 1/n_perm
else:
self.p = None
N = xpha.shape[0]
# Manage jobs repartition :
if (N < cpu_count()) and (n_jobs != 1):
surJob = n_jobs
elecJob = 1
elif (N >= cpu_count()) and (n_jobs != 1):
surJob = 1
elecJob = n_jobs
else:
surJob, elecJob = 1, 1
# Get the unormalized cfc and surogates:
cfcsu = Parallel(n_jobs=elecJob)(delayed(_cfcFiltSuro)(
xpha[k, ...], xamp[k, ...], surJob, self) for k in range(N))
uCfc, Suro, mSuro, stdSuro = zip(*cfcsu)
uCfc = np.array(uCfc)
# Permutations ans stat:
if (self.Id[0] is not '5'):
# Compute permutations :
if (self.n_perm is not 0) and (self.Id[1] is not '0'):
Suro, mSuro, stdSuro = np.array(
Suro), np.array(mSuro), np.array(stdSuro)
# Normalize each cfc:
_, _, Norm, _, _, _ = CfcSettings(self.Id)
nCfc = Norm(uCfc, mSuro, stdSuro)
# Confidence interval :
pvalue = perm_2pvalue(uCfc.mean(2), np.rollaxis(Suro.mean(2), 4),
self.n_perm, tail=1)
return nCfc.transpose(3, 4, 0, 1, 2), pvalue.transpose(2, 3, 0, 1)
else:
return uCfc.transpose(3, 4, 0, 1, 2), None
elif self.Id[0] is '5':
# Ozkurt threshold :
xlim = (erfinv(1-p)**2)
# Set to zero non-significant values:
idxUn = np.where(uCfc <= 2*xlim)
uCfc[idxUn] = 0
return uCfc.transpose(3, 4, 0, 1, 2), None
class PhaseLockedPower(object):
"""Extract phase-locked power and visualize shifted time-frequency map
according to phase peak.
Args:
sf: int
Sampling frequency
npts: int
Number of points of the time serie
Kargs:
f: tuple/list, optional, [def: (2, 200, 10, 5)]
The frequency vector (fstart, fend, fwidth, fstep)
pha: tuple/list, optional, [def: [8, 13]]
Frequency for phase.
time: array/list, optional, [def: None]
The time vector to use
baseline: tuple/list, optional, [def: None]
Location of baseline (in sample)
norm: integer, optional, [def: None]
Normalize method
- 0: No normalisation
- 1: Substraction
- 2: Division
- 3: Substract then divide
- 4: Z-score
powArgs: any supplementar arguments are directly passed to the power
function.
"""
def __init__(self, sf, npts, f=(2, 200, 10, 5), pha=[8, 13], time=None,
baseline=None, norm=None, **powArgs):
# Define objects:
self._normBck = norm
self._baseline = baseline
self._powObj = power(
sf, npts, f=f, baseline=baseline, norm=0, time=time, **powArgs)
self._phaObj = phase(sf, npts, f=pha)
self._sigObj = sigfilt(sf, npts, f=pha)
def get(self, x, cue):
"""Get power phase locked
Args:
x: array
Data of shape (npt, ntrials)
cue: integer
Cue to align time-frequency maps.
Returns:
xpow, xpha, xsig: repectively realigned power, phase and filtered
signal
"""
# Find cue according to define time vector
self._cue = cue
xvec = self._powObj.xvec
cue = np.abs(np.array(xvec)-cue).argmin()
self._cueIdx = cue
# Extact power, phase and filtered signal:
xpow = np.squeeze(self._powObj.get(x)[0])
xpha = np.squeeze(self._phaObj.get(x)[0])
xsig = np.squeeze(self._sigObj.get(x)[0])
# Re-align:
xpha_s, xpow_s, xsig_s = np.empty_like(
xpha), np.empty_like(xpow), np.empty_like(xsig)
nTrials = xsig.shape[1]
for k in range(nTrials):
# Get shifting:
move = self._PeakDetection(xsig[:, k], cue)
# Apply shifting:
xpha_s[:, k] = self._ShiftSignal(np.matrix(xpha[:, k]), move)
xsig_s[:, k] = self._ShiftSignal(np.matrix(xsig[:, k]), move)
xpow_s[:, :, k] = self._ShiftSignal(xpow[:, :, k], move)
xpow_s = np.mean(xpow_s, 2)
# Normalize mean power:
if self._normBck is not 0:
bsl = self._baseline
xFm = np.mean(xpow_s[:, bsl[0]:bsl[1]], 1)
baseline = np.tile(xFm[:, np.newaxis], [1, xpow_s.shape[1]])
xpow_s = normalize(xpow_s, baseline, norm=self._normBck)
return xpow_s, xpha_s, xsig_s
def tflockedplot(self, xpow, sig, cmap='viridis', vmin=None, vmax=None,
ylim=None, alpha=0.3, kind='std', vColor='r',
sigcolor='slateblue', fignum=0):
"""Plot realigned time-frequency maps.
Args:
xpow, sig: output of the get() method. sig can either be the phase
or the filtered signal.
Kargs:
cmap: string, optional, [def: 'viridis']
The colormap to use
vmin, vmax: int/float, otpional, [def: None, None]
Limits of the colorbar
ylim: tuple/list, optional, [def: None]
Limit for the plot of the signal
alpha: float, optional, [def: 0.3]
Transparency of deviation/sem
kind: string, optional, [def: 'std']
Choose between 'std' or 'sem' to either display standard
deviation or standard error on the mean for the signal plot
vColor: string, optional, [def: 'r']
Color of the vertical line which materialized the choosen cue
sigcolor: string, optional, [def: 'slateblue']
Color of the signal
fignum: integer, optional, [def: 0]
Number of the figure
Returns:
figure, axes1 (TF plot), axes2 (signal plot), axes3 (colorbar)
"""
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
xvec = self._powObj.xvec
yvec = self._powObj.yvec
fig = plt.figure(fignum, figsize=(8, 9))
gs = gridspec.GridSpec(11, 11)
ax1 = plt.subplot(gs[0:-2, 0:-1])
ax2 = plt.subplot(gs[-2::, 0:-1])
ax3 = plt.subplot(gs[1:-3, -1])
# TF:
im = ax1.imshow(xpow, aspect='auto', cmap=cmap, vmin=vmin, vmax=vmax,
extent=[xvec[0], xvec[-1], yvec[-1], yvec[0]])
addLines(ax1, vLines=[self._cue], vShape=['-'], vWidth=[3],
vColor=[vColor])
ax1.set_xticks([])
ax1.set_xticklabels('')
ax1.set_ylabel('Frequency (hz)')
ax1.invert_yaxis()
ax1.tick_params(axis='both', which='both', top='off', right='off')
ax1.axis('tight')
cb = plt.colorbar(im, cax=ax3)
cb.set_ticks(cb.get_clim())
cb.set_label('Power modulations', labelpad=-10)
# Signal:
xm = sig.mean(1)
if kind == 'std':
x2add = sig.std(1)
elif kind == 'sem':
x2add = sig.std(1)/np.sqrt(len(xm)-1)
xlow, xhigh = xm-x2add, xm+x2add
ax = ax2.plot(xvec, xm, lw=2, color=sigcolor)
ax2.fill_between(xvec, xlow, xhigh, alpha=alpha,
color=ax[0].get_color())
ax2.set_yticks(ax2.get_ylim())
ax2.tick_params(axis='both', which='both', top='off', right='off')
ax2.set_xlabel('Time')
if ylim is not None:
ax2.set_ylim(ylim)
else:
ax2.axis('tight')
addLines(ax2, vLines=[self._cue], vShape=['-'], vWidth=[3],
vColor=[vColor])
return plt.gcf(), ax1, ax2, ax3
@staticmethod
def _PeakDetection(sig, cue):
"""Detect peaks in a signal and return the shifting length
corresponding to the defined cue
sig: vector
cue: integer (in sample)
"""
peaks = []
for k in range(len(sig)-1):
if (sig[k-1] < sig[k]) and (sig[k] > sig[k+1]):
peaks.append(k)
minPeak = peaks[np.abs(np.array(peaks)-cue).argmin()]
return minPeak-cue
@staticmethod
def _ShiftSignal(sig, move):
"""
"""
npts = sig.shape[1]
sigShift = np.zeros(sig.shape)
if move >= 0:
sigShift[:, 0:npts-move] = sig[:, move::]
elif move < 0:
sigShift[:, np.abs(move)::] = sig[:, 0:npts-np.abs(move)]
return sigShift
class erpac(_coupling):
"""Compute Event Related Phase-Amplitude coupling. See [#f6]_
.. rubric:: Footnotes
.. [#f6] `Voytek et al, 2013 <http://www.ncbi.nlm.nih.gov/pubmed/22986076>`_
Args:
sf: int
Sampling frequency
npts: int
Number of points of the time serie
Kargs:
pha_f: tuple/list, optional, [def: [2,4]]
List containing the couple of frequency bands for the phase.
Example: f=[ [2,4], [5,7], [60,250] ]
pha_meth: string, optional, [def: 'hilbert']
Method for the phase extraction.
pha_cycle: integer, optional, [def: 3]
Number of cycles for filtering the phase.
amp_f: tuple/list, optional, [def: [60,200]]
List containing the couple of frequency bands for the amplitude.
Each couple can be either a list or a tuple.
amp_meth: string, optional, [def: 'hilbert']
Method for the amplitude extraction.
amp_cycle: integer, optional, [def: 6]
Number of cycles for filtering the amplitude.
"""
__doc__ += windoc
def __init__(self, sf, npts, pha_f=[2, 4], pha_meth='hilbert',
pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6,
window=None, step=None, width=None, time=None, **kwargs):
# Check pha and amp methods:
_checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2'])
_checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2'])
# Check the type of f:
if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)):
pha_f = binarize(
pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list')
if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)):
amp_f = binarize(
amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list')
# Initialize cfc :
_coupling.__init__(self, pha_f, 'phase', pha_meth, pha_cycle,
amp_f, 'amplitude', amp_meth, amp_cycle,
sf, npts, window, width, step, time, **kwargs)
def get(self, xpha, xamp, n_perm=200, n_jobs=-1):
"""Get the erpac mesure between an xpha and xamp signals.
Args:
xpha: array
Signal for phase. The shape of xpha should be :
(n_electrodes x n_pts x n_trials)
xamp: array
Signal for amplitude. The shape of xamp should be :
(n_electrodes x n_pts x n_trials)
Kargs:
n_perm: integer, optional, [def: 200]
Number of permutations for normalizing the cfc mesure.
n_jobs: integer, optional, [def: -1]
Control the number of jobs for parallel computing. Use 1, 2, ..
depending of your number or cores. -1 for all the cores.
If the same signal is used (example : xpha=x and xamp=x), this mean
the program compute a local erpac.
Returns:
xerpac: array
The erpac mesure of size :
(n_amplitude x n_phase x n_electrodes x n_windows)
pvalue: array
The associated p-values of size :
(n_amplitude x n_phase x n_electrodes x n_windows)
"""
# Check and get methods:
xpha, xamp = _cfcCheck(xpha, xamp, self._npts)
npha, namp = self._nPha, self._nAmp
phaMeth = self._pha.get(self._sf, self._pha.f, self._npts)
ampMeth = self._amp.get(self._sf, self._amp.f, self._npts)
# Extract phase and amplitude:
nelec, npts, ntrials = xpha.shape
xp, xa = cfcparafilt(xpha, xamp, n_jobs, self)
# Window:
if not (self._window == [(0, npts)]):
xp = binArray(xp, self._window, axis=2)[0]
xa = binArray(xa, self._window, axis=2)[0]
npts = xp.shape[2]
# Extract ERPAC and surrogates:
iteract = product(range(nelec), range(npha), range(namp))
xerpac = np.zeros((nelec, npha, namp, npts))
pval = np.empty_like(xerpac)
for e, p, a in iteract:
xerpac[e, p, a, :], pval[e, p, a, :] = _erpac(xp[e, p, ...],
xa[e, a, ...], n_perm, n_jobs)
return xerpac, pval
def _erpac(xp, xa, n_perm, n_jobs):
"""Sub erpac function
[xp] = [xa] = (npts, ntrials)
"""
npts, ntrials = xp.shape
# Compute ERPAC
xerpac = np.zeros((npts,))
for t in range(npts):
xerpac[t] = circ_corrcc(xp[t, :], xa[t, :])[0]
# Compute surrogates:
data = Parallel(n_jobs=n_jobs)(delayed(_erpacSuro)(
xp, xa, npts, ntrials) for pe in range(n_perm))
suro = np.array(data)
# Normalize erpac:
xerpac = (xerpac - suro.mean(0))/suro.std(0)
# Get p-value:
pvalue = norm.cdf(-np.abs(xerpac))*2
return xerpac, pvalue
def _erpacSuro(xp, xa, npts, ntrials):
"""Parallel surrogates
"""
# Permute ntrials (only for amplitude):
perm = np.random.permutation(ntrials)
for t in range(npts):
suro = circ_corrcc(xp[t, :], xa[t, perm])[0]
return suro
class pfdphase(_coupling):
"""Get the preferred phase of a phase-amplitude coupling
Args:
sf: int
Sampling frequency
npts: int
Number of points of the time serie
Kargs:
nbins: integer, optional, [def: 18]
Number of bins to binarize the amplitude.
pha_f: tuple/list, optional, [def: [2,4]]
List containing the couple of frequency bands for the phase.
Example: f=[ [2,4], [5,7], [60,250] ]
pha_meth: string, optional, [def: 'hilbert']
Method for the phase extraction.
pha_cycle: integer, optional, [def: 3]
Number of cycles for filtering the phase.
amp_f: tuple/list, optional, [def: [60,200]]
List containing the couple of frequency bands for the amplitude.
Each couple can be either a list or a tuple.
amp_meth: string, optional, [def: 'hilbert']
Method for the amplitude extraction.
amp_cycle: integer, optional, [def: 6]
Number of cycles for filtering the amplitude.
"""
__doc__ += windoc
def __init__(self, sf, npts, nbins=18, pha_f=[2, 4], pha_meth='hilbert',
pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6,
window=None, width=None, step=None, time=None,
**kwargs):
# Check pha and amp methods:
_checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2'])
_checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2'])
# Check the type of f:
if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)):
pha_f = binarize(
pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list')
if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)):
amp_f = binarize(
amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list')
self.xvec = []
# Binarize phase vector :
self._binsize = 360 / nbins
self._phabin = np.arange(0, 360, self._binsize)
self.phabin = np.concatenate((self._phabin[:, np.newaxis],
self._phabin[:, np.newaxis]+self._binsize), axis=1)
# Initialize coupling:
_coupling.__init__(self, pha_f, 'phase', pha_meth, pha_cycle,
amp_f, 'amplitude', amp_meth, amp_cycle,
sf, npts, window, width, step, time, **kwargs)
self._nbins = nbins
def get(self, xpha, xamp, n_jobs=-1):
"""Get the preferred phase
Args:
xpha: array
Signal for phase. The shape of xpha should be :
(n_electrodes x n_pts x n_trials)
xamp: array
Signal for amplitude. The shape of xamp should be :
(n_electrodes x n_pts x n_trials)
Kargs:
n_jobs: integer, optional, [def: -1]
Control the number of jobs for parallel computing. Use 1, 2, ..
depending of your number or cores. -1 for all the cores.
If the same signal is used (example : xpha=x and xamp=x), this mean
the program compute a local cfc.
Returns:
pfp: array
The preferred phase extracted from the mean of trials of size :
(n_amplitude x n_phase x n_electrodes x n_windows)
prf: array
The preferred phase extracted from each trial of size :
(n_amplitude x n_phase x n_electrodes x n_windows x n_trials)
ambin: array
The binarized amplitude of size :
(n_amplitude x n_phase x n_electrodes x n_windows x n_bins x n_trials)
pvalue: array
The associated p-values of size :
(n_amplitude x n_phase x n_electrodes x n_windows)
"""
# Check the inputs variables :
xpha, xamp = _cfcCheck(xpha, xamp, self._npts)
nelec, npts, ntrials = xamp.shape
namp, npha, nwin, nbins = self._nAmp, self._nPha, self._nwin, self._nbins
phabin, binsize = self._phabin, self._binsize
# Get filtered phase and amplitude ;
pha, amp = cfcparafilt(xpha, xamp, n_jobs, self)
# Bring phase from [-pi,pi] to [0, 360]
pha = np.rad2deg((pha+2*np.pi)%(2*np.pi))
# Windowing phase an amplitude :
pha = [pha[:, :, k[0]:k[1], :] for k in self._window]
amp = [amp[:, :, k[0]:k[1], :] for k in self._window]
# Define iter product :
iteract = product(range(namp), range(npha), range(nelec), range(nwin))
data = Parallel(n_jobs=n_jobs)(delayed(_pfp)(
pha[w][e, p, ...], amp[w][e, a, ...],
phabin, binsize) for a, p, e, w in iteract)
# Manage dim and output :
pfp, prf, pval, ampbin = zip(*data)
del pha, amp, data
ls = [namp, npha, nelec, nwin, nbins, ntrials]
ampbin = np.array(ampbin).reshape(*tuple(ls))
prf = np.array(prf).reshape(*tuple(ls[0:-2]))
pval = np.array(pval).reshape(*tuple(ls[0:-2]))
ls.pop(4)
pfp = np.array(pfp).reshape(*tuple(ls))
return pfp, prf, ampbin, pval
def _pfp(pha, amp, phabin, binsize):
"""Sub prefered phase function
"""
nbin, nt = len(phabin), pha.shape[1]
ampbin = np.zeros((len(phabin), nt), dtype=float)
# Binarize amplitude accros all trials :
for t in range(nt):
curpha, curamp = pha[:, t], amp[:, t]
for i, p in enumerate(phabin):
idx = np.logical_and(curpha >= p, curpha < p+binsize)
if idx.astype(int).sum() != 0:
ampbin[i, t] = curamp[idx].mean()
else:
ampbin[i, t] = 0
ampbin[:, t] /= ampbin[:, t].sum()
# Find prefered phase and p-values :
pfp = np.array([phabin[k]+binsize/2 for k in ampbin.argmax(axis=0)])
pvalue = circ_rtest(pfp)[0]
prf = phabin[ampbin.mean(axis=1).argmax()]+binsize/2
return pfp, prf, pvalue, ampbin
class PLV(_coupling):
"""Compute the Phase-Locking Value [#f7]_
Args:
sf: int
Sampling frequency
npts: int
Number of points of the time serie
Kargs:
f: tuple/list, optional, [def: [2,4]]
List containing the couple of frequency bands for the phase.
Example: f=[ [2,4], [5,7], [60,250] ]
method: string, optional, [def: 'hilbert']
Method for the phase extraction.
cycle: integer, optional, [def: 3]
Number of cycles for filtering the phase.
sample: list, optional, [def: None]
Select samples in the time series to compute the plv
time: list/array, optional [def: None]
Define a specific time vector
amp_cycle: integer, optional, [def: 6]
Number of cycles for filtering the amplitude.
.. rubric:: Footnotes
.. [#f7] `Lachaux et al, 1999 <http://www.ma.utexas.edu/users/davis/reu/ch3/cwt/lachaux.pdf>`_
"""
def __init__(self, sf, npts, f=[2, 4], method='hilbert', cycle=3,
sample=None, time=None, **kwargs):
# Check pha and amp methods:
_checkref('pha_meth', method, ['hilbert', 'hilbert1', 'hilbert2'])
# Check the type of f:
if (len(f) == 4) and isinstance(f[0], (int, float)):
f = binarize(f[0], f[1], f[2], f[3], kind='list')
# Initialize PLV :
_coupling.__init__(self, f, 'phase', method, cycle,
f, 'phase', method, cycle,
sf, npts, None, None, None, time, **kwargs)
if time is None:
time = np.arange(npts)
else:
time = time
if sample is None:
sample = slice(npts)
self._sample = sample
self.time = time[sample]
del self.amp
def get(self, xelec1, xelec2, n_perm=200, n_jobs=-1):
"""Get Phase-Locking Values for a set of distant sites
Args:
xelec1, xelec2: array
PLV will be compute between xelec1 and xelec2. Both matrix
contains times-series of each trial pear electrodes. It's
not forced that both have the same size but they must have at least
the same number of time points (npts) and trials (ntrials).
[xelec1] = (n_elec1, npts, ntrials), [xelec2] = (n_elec2, npts, ntrials)
Kargs:
n_perm: int, optional, [def: 200]
Number of permutations to estimate the statistical significiancy
of the plv mesure
n_jobs: integer, optional, [def: -1]
Control the number of jobs for parallel computing. Use 1, 2, ..
depending of your number or cores. -1 for all the cores.
Returns:
plv: array
The plv mesure for each phase and across electrodes of size:
[plv] = (n_pha, n_elec1, n_elec2, n_sample)
pvalues: array
The p-values with the same shape of plv
"""
# Check the inputs variables :
if xelec1.ndim == 2:
xelec1 = xelec1[np.newaxis, ...]
if xelec2.ndim == 2:
xelec2 = xelec2[np.newaxis, ...]
if (xelec1.shape[1] != self._npts) or (xelec2.shape[1] != self._npts):
raise ValueError("The second dimension of xelec1 and xelec2 must be "+str(self._npts))
if not np.array_equal(np.array(xelec1.shape[1::]), np.array(xelec2.shape[1::])):
raise ValueError("xelec1 and xelec2 could have a diffrent number of electrodes"
" but the number of time points and trials must be the same.")
nelec1, npts, ntrials, nelec2, npha = *xelec1.shape, xelec2.shape[0], self._nPha
# Get filtered phase for xelec1 and xelec2 :
xcat = np.concatenate((xelec1, xelec2), axis=0)
del xelec1, xelec2
data = np.array(Parallel(n_jobs=n_jobs)(delayed(_plvfilt)(
xcat[e, ...], self) for e in range(xcat.shape[0])))
xp1, xp2 = data[0:nelec1, ...], data[nelec1::, ...]
del data
# Select samples :
xp1, xp2 = xp1[:, :, self._sample, :], xp2[:, :, self._sample, :]
npts = xp1.shape[2]
# Compute true PLV:
iteract = product(range(nelec1), range(nelec2))
plv = np.array(Parallel(n_jobs=n_jobs)(delayed(_plv)(
xp1[e1, ...], xp2[e2, ...]) for e1, e2 in iteract))
plv = np.transpose(plv.reshape(nelec1, nelec2, npha, npts), (2, 0, 1, 3))
# Compute surrogates:
pvalues = np.zeros_like(plv)
perm = [np.random.permutation(ntrials) for k in range(n_perm)]
iteract = product(range(nelec1), range(nelec2))
for e1, e2 in iteract:
pvalues[:, e1, e2, ...] = _plvstat(xp1[e1, ...], xp2[e2, ...],
plv[:, e1, e2, ...], n_perm, n_jobs, perm)
return plv, pvalues
def _plvfilt(x, self):
"""Sub PLV filt
"""
# Get the filter for phase/amplitude properties :
fMeth = self._pha.get(self._sf, self._pha.f, self._npts)
return self._pha.apply(x, fMeth)
def _plvstat(xp1, xp2, plv, n_perm, n_jobs, perm):
"""Sub plv-stat function
"""
# Compute plv for each permutation of xp2 trials :
plvs = np.array(Parallel(n_jobs=n_jobs)(delayed(_plv)(
xp1, xp2[..., p]) for p in perm))
# Get p-values from permutations :
return perm_2pvalue(plv, plvs, n_perm, tail=1)
def _plv(phi1, phi2):
"""PLV, (lachaux et al, 1999)
"""
return np.abs(np.exp(1j*(phi1-phi2)).mean(axis=-1))
|
gpl-3.0
|
bmazin/ARCONS-pipeline
|
examples/cosmic-tuning/cosmic-second-boundary-2.py
|
1
|
2930
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from util.ObsFile import ObsFile
from util.FileName import FileName
from util import utils
from util import hgPlot
from cosmic.Cosmic import Cosmic
import tables
from hotpix import hotPixels
import pickle
from interval import interval, inf, imath
import logging, os
import pickle
run = 'PAL2012'
sundownDate = '20121211'
obsDate = '20121212'
seq = '121229'
populationMax=1000
# test the theory that that extra photons show up 23 clock ticks
# after the beginning of each second. Plot properties of the photons
offsets = [23, 100, 1234, 54321]
#offsets = [23]
beginTime = 0
expTime = 300
pickleFile = open("csb2.pkl","wb")
fn = FileName(run, sundownDate, obsDate+"-"+seq)
pickle.dump(fn,pickleFile)
obsFile = ObsFile(fn.obs())
obsFile.loadTimeAdjustmentFile(FileName(run=run).timeAdjustments()) # Matt's time fix
timeMaskFile = fn.timeMask()
if os.path.exists(timeMaskFile):
obsFile.loadHotPixCalFile(timeMaskfile, switchOnMask=True)
if False:
iRow0 = 25
iRow1 = iRow0+1
iCol0 = 40
iCol1 = iCol0+1
else:
iRow0 = 0
iRow1 = obsFile.nRow
iCol0 = 0
iCol1 = obsFile.nCol
spt = obsFile.tickDuration
masks = {}
pickle.dump(offsets,pickleFile)
for offset in offsets:
print "begin offset=",offset
masks[offset] = interval()
for sec in range(beginTime,beginTime+expTime):
tl = sec+obsFile.tickDuration*(offset-2)
tr = sec+obsFile.tickDuration*(offset+3)
masks[offset] = masks[offset] | interval([sec,tl]) | interval([tr,sec+1])
obsFile.cosmicMask = masks[offset]
obsFile.switchOnCosmicTimeMask()
nPhotonSum = 0
rows = np.zeros(0,dtype=np.int)
cols = np.zeros(0,dtype=np.int)
dts = np.zeros(0,dtype=np.double)
secs = np.zeros(0,dtype=np.int)
phs = np.zeros(0,dtype=np.double)
for iRow in range(iRow0, iRow1):
for iCol in range(iCol0, iCol1):
#tpl = obsFile.getTimedPacketList(iRow, iCol, firstSec=beginTime, integrationTime=expTime)
tpl = obsFile.getPackets(iRow, iCol, firstSec=beginTime, integrationTime=expTime, fields=['peakHeights'])
nPhoton = len(tpl['timestamps'])
if nPhoton>0:
print "offset=",offset,"row=",iRow, "iCol=",iCol, "nPhotons=",nPhoton
rows = np.append(rows, iRow*np.ones(nPhoton,dtype=np.int))
cols = np.append(cols, iCol*np.ones(nPhoton,dtype=np.int))
dts = np.append(dts, tpl['timestamps']-tl)
secs = np.append(secs, tpl['timestamps'].astype(np.int))
phs = np.append(phs, tpl['peakHeights'])
nPhotonSum += nPhoton
pickle.dump({"offset":offset,"rows":rows,"cols":cols,"dts":dts,"secs":secs,"phs":phs}, pickleFile)
print "keys=",tpl.keys()
print "photonSum=",nPhotonSum
#plt.savefig("csb2.png")
pickleFile.close()
del obsFile
|
gpl-2.0
|
mohseniaref/adore-doris
|
scr/gmtsarReadfiles.py
|
1
|
8223
|
#!/usr/bin/env python
"""
gmtsarReadfiles.py SlcFile.SLC ParameterFile.PRM StateVectorFile.LED
"""
import os, sys
import getopt
import StringIO
import ConfigParser
import matplotlib
import matplotlib.pylab
import numpy as np
import adore
def usage():
print __doc__
def usageLong():
print """
DESCRIPTION [default]:
"""
def main(argv):
if not argv:
usage()
sys.exit(2)
try:
slcfile=argv[0];
except:
print "SLC (*.DAT) file not specified."
sys.exit(2)
try:
prmfile=argv[1];
except:
print "Parameter (*.PRM) file not specified."
sys.exit(2)
try:
ledfile=argv[2];
except:
print "Leader (*.LED) file not specified."
sys.exit(2)
if not os.path.exists(prmfile):
print "File not found:", prmfile
sys.exit(2)
if not os.path.exists(ledfile):
print "File not found:", ledfile
sys.exit(2)
if not os.path.exists(slcfile):
print "File not found:", slcfile
sys.exit(2)
#print('prmfile: %s' % prmfile);
#print('slcfile: %s' % slcfile);
c=2.997e8;
#print('Reading the parameter file: %s' % prmfile);
d=prm2dict(prmfile)
d['slcfile']=slcfile+'.ci2'
d['prmfile']=prmfile
d['product']=d['slc_file'][:-4]
d['dummy'] ='dummy'
d['checknumlines'] = int(d['num_patches'])*int(d['num_valid_az'])/2
#print d
if int(d['sc_identity']) <= 2 :
d['producttype'] = "ERS"
elif int(d['sc_identity']) == 3 :
d['producttype'] = "ALOS"
elif int(d['sc_identity']) == 4 :
d['producttype'] = "ASAR"
d['sarprocessor']='GAMMA'
d['pass']='dummy'#'DESCENDING'
d['frequency']=c/np.double(d['radar_wavelength'])
d['rng_samp_rate']=np.double(d['rng_samp_rate'])/1e6; #MHz
d['rbw']=d['rng_samp_rate']
dt=matplotlib.pylab.num2date(matplotlib.pylab.datestr2num(d['sc_clock_start'][:4]+'-01-01')+np.double(d['sc_clock_start'][4:]))
#d['scenedate']=dt.isoformat()
d['firstlinetime']=dt.strftime('%d-%b-%Y %H:%M:%S.')+str(dt.microsecond)
dt=matplotlib.pylab.num2date(matplotlib.pylab.datestr2num(d['sc_clock_stop'][:4]+'-01-01')+np.double(d['sc_clock_stop'][4:]))
d['lastlinetime']=dt.strftime('%d-%b-%Y %H:%M:%S.')+str(dt.microsecond)
d['prf']=np.double(d['prf'])
d['abw']=d['prf']
d['twt']=2.*np.double(d['near_range'])/c*1e3 #ms
d['sv']=np.loadtxt(ledfile, skiprows=1, usecols=[2,3,4,5])
d['numstatevectors']=len(d['sv'])
#print('Writing the Doris result file...');
printout(d)
#print('Converting the slc file to Doris format...');
#convert slc file
bsq2bip(slcfile, int(d['num_rng_bins']));
#print('All done.');
def prm2dict(prm):
'''prm2dict(prm)
'''
#http://stackoverflow.com/questions/2885190/using-pythons-configparser-to-read-a-file-without-section-name
prm_str = '[gmtsar]\n' + open(prm, 'r').read()
prm_fp = StringIO.StringIO(prm_str)
config = ConfigParser.RawConfigParser()
config.readfp(prm_fp)
return config._sections['gmtsar']
def printout(d):
''' printout(dict)
'''
str0='''
=====================================================
MASTER RESULTFILE: master.res
Created by:
InSAR Processor: Doris (Delft o-o Radar Interferometric Software)
Version: Version (optimal)
FFTW library: used
VECLIB library: not used
LAPACK library: not used
Compiled at: Dec 19 2008 17:26:52
By GNU gcc: 4.1.4
File creation at: Fri Dec 19 19:08:21 2008
--------------------------------------------------------
| Delft Institute of Earth Observation and Space Systems |
| Delft University of Technology |
| http://enterprise.lr.tudelft.nl/doris/ |
--------------------------------------------------------
Start_process_control
readfiles: 1
precise_orbits: 0
modify_orbits: 0
crop: 0
sim_amplitude: 0
master_timing: 0
oversample: 0
resample: 0
filt_azi: 0
filt_range: 0
NOT_USED: 0
End_process_control
*******************************************************************
*_Start_readfiles:
*******************************************************************
Volume file: %s
Volume_ID: %s
Volume_identifier: %s
Volume_set_identifier: %s
(Check)Number of records in ref. file: %d
Product type specifier: %s
SAR_PROCESSOR: %s
SWATH: %s
PASS: %s
RADAR_FREQUENCY (HZ): %f
Logical volume generating facility: %s
Logical volume creation date: %s
Location and date/time of product creation: %s
Scene identification: ORBIT %s
Scene location: FRAME %s
Leader file: %s
Sensor platform mission identifer: %s
Scene_centre_latitude: %s
Scene_centre_longitude: %s
Scene_centre_heading: %s
Radar_wavelength (m): %f
First_pixel_azimuth_time (UTC): %s
TIME TO LAST LINE: compute prf: %s
Pulse_Repetition_Frequency (computed, Hz): %f
Total_azimuth_band_width (Hz): %f
Weighting_azimuth: %s
Xtrack_f_DC_constant (Hz, early edge): %f
Xtrack_f_DC_linear (Hz/s, early edge): %f
Xtrack_f_DC_quadratic (Hz/s/s, early edge): %f
Range_time_to_first_pixel (2way) (ms): %.10f
Range_sampling_rate (computed, MHz): %f
Total_range_band_width (MHz): %f
Weighting_range: %s
*******************************************************************
*_Start_leader_datapoints
*******************************************************************
t(s) X(m) Y(m) Z(m)
NUMBER_OF_DATAPOINTS: %d
'''
sys.stdout.write(str0 % (d['product'],
d['dummy'],
d['dummy'],
d['dummy'],
d['checknumlines'],
d['producttype'],
d['sarprocessor'],
d['dummy'],
d['pass'],
d['frequency'],
d['dummy'],
d['dummy'],
d['dummy'],
d['dummy'],
d['dummy'],
d['product'],
d['producttype'],
d['dummy'],
d['dummy'],
d['dummy'],
np.double(d['radar_wavelength']),
d['firstlinetime'],
d['lastlinetime'],
np.double(d['prf']),
np.double(d['abw']),
d['dummy'],
np.double(d['fd1']),
np.double(d['fdd1']),
np.double(d['fddd1']),
d['twt'],
d['rng_samp_rate'],
d['rbw'],
d['dummy'],
d['numstatevectors']) );
# Dump the state vectors in the required format...
# $AWK '/^time_of_first_state_vector/{t=$2};/^state_vector_interval/{dt=$2;c=0};/^state_vector_position/{ printf "%.6f\t%.3f\t%.3f\t%.3f\n", t+(c++)*dt, $2, $3, $4 }' $PARFILE
for k in xrange(len(d['sv'])):
sys.stdout.write(str("%f\t%f\t%f\t%f\n" %(d['sv'][k,0], d['sv'][k,1], d['sv'][k,2], d['sv'][k,3])))
# Dump the closing section...
sys.stdout.write('''
*******************************************************************
* End_leader_datapoints:_NORMAL
*******************************************************************
Datafile: %s
Number_of_lines_original: %d
Number_of_pixels_original: %d
*******************************************************************
* End_readfiles:_NORMAL
*******************************************************************
''' % (d['slcfile'],
d['checknumlines'],
int(d['num_rng_bins'])))
def bsq2bip(slcfile, width):
#print('...Reading the data file...');
if not os.access(slcfile+'.ci2', os.R_OK):
data=adore.getdata(slcfile, width, 'ci4', interleave='bsq');
#print('...Writing doris compatible slc file');
adore.writedata(slcfile+'.ci2', data, 'ci2');
if __name__=='__main__':
main(sys.argv[1:]);
|
gpl-2.0
|
elkingtonmcb/h2o-2
|
py/testdir_single_jvm/test_GLM2_basic_cmp2.py
|
9
|
7280
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs
import h2o_print as h2p
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
import sklearn as sk
import statsmodels as sm
import statsmodels.api as sm_api
print "numpy, scipy and sklearn are installed. Will do extra checks"
except ImportError:
print "numpy, sklearn, or statsmodels is not installed. Will just do h2o stuff"
SCIPY_INSTALLED = False
# http://statsmodels.sourceforge.net/devel/glm.html#module-reference
# This seems better than the sklearn LogisticRegression I was using
# Using Logit is as simple as thishttp://statsmodels.sourceforge.net/devel/examples/generated/example_discrete.html
#*********************************************************************************
def do_statsmodels_glm(self, bucket, csvPathname, L, family='gaussian'):
h2p.red_print("Now doing statsmodels")
h2p.red_print("http://statsmodels.sourceforge.net/devel/glm.html#module-reference")
h2p.red_print("http://statsmodels.sourceforge.net/devel/generated/statsmodels.genmod.generalized_linear_model.GLM.html")
import numpy as np
import scipy as sp
from numpy import loadtxt
import statsmodels as sm
csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
if 1==1:
dataset = np.loadtxt(
open(csvPathnameFull,'r'),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
# skipping cols from the begining... (ID is col 1)
# In newer versions of Numpy, np.genfromtxt can take an iterable argument,
# so you can wrap the file you're reading in a generator that generates lines,
# skipping the first N columns. If your numbers are comma-separated, that's something like
if 1==0:
f = open(csvPathnameFull,'r'),
np.genfromtxt(
(",".join(ln.split()[1:]) for ln in f),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
print "\ncsv read for training, done"
# data is last column
# drop the output
n_features = len(dataset[0]) - 1;
print "n_features:", n_features
# don't want ID (col 0) or CAPSULE (col 1)
# get CAPSULE
target = [x[1] for x in dataset]
# slice off the first 2
train = np.array ( [x[2:] for x in dataset] )
n_samples, n_features = train.shape
print "n_samples:", n_samples, "n_features:", n_features
print "histogram of target"
print sp.histogram(target,3)
print "len(train):", len(train)
print "len(target):", len(target)
print "dataset shape:", dataset.shape
if family!='gaussian':
raise Exception("Only have gaussian logistic for scipy")
# train the classifier
gauss_log = sm_api.GLM(target, train, family=sm_api.families.Gaussian(sm_api.families.links.log))
start = time.time()
gauss_log_results = gauss_log.fit()
print "sm_api.GLM took", time.time() - start, "seconds"
print gauss_log_results.summary()
#*********************************************************************************
def do_h2o_glm(self, bucket, csvPathname, L, family='gaussian'):
h2p.red_print("\nNow doing h2o")
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', timeoutSecs=180)
# save the resolved pathname for use in the sklearn csv read below
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print inspect
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# Need to chop out the ID col?
# x = 'ID'
# y = 'CAPSULE'
family = family
alpha = '0'
lambda_ = L
nfolds = '0'
modelKey = 'GLM_Model'
y = 'GLEASON'
kwargs = {
'response' : y,
'ignored_cols' : 'ID, CAPSULE',
'family' : family,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds, # passes if 0, fails otherwise
'destination_key' : modelKey,
}
timeoutSecs = 60
start = time.time()
glmResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
# this stuff was left over from when we got the result after polling the jobs list
# okay to do it again
# GLM2: when it redirects to the model view, we no longer have the job_key! (unlike the first response and polling)
(warnings, clist, intercept) = h2o_glm.simpleCheckGLM(self, glmResult, None, **kwargs)
cstring = "".join([("%.5e " % c) for c in clist])
h2p.green_print("h2o alpha ", alpha)
h2p.green_print("h2o lambda ", lambda_)
h2p.green_print("h2o coefficient list:", cstring)
h2p.green_print("h2o intercept", "%.5e " % intercept)
# other stuff in the json response
glm_model = glmResult['glm_model']
_names = glm_model['_names']
coefficients_names = glm_model['coefficients_names']
# the first submodel is the right one, if onely one lambda is provided as a parameter above
submodels = glm_model['submodels'][0]
beta = submodels['beta']
h2p.red_print("beta:", beta)
norm_beta = submodels['norm_beta']
iteration = submodels['iteration']
validation = submodels['validation']
auc = validation['auc']
aic = validation['aic']
null_deviance = validation['null_deviance']
residual_deviance = validation['residual_deviance']
print '_names', _names
print 'coefficients_names', coefficients_names
# did beta get shortened? the simple check confirms names/beta/norm_beta are same length
print 'beta', beta
print 'iteration', iteration
print 'auc', auc
#*********************************************************************************
# the actual test that will run both
#*********************************************************************************
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_basic_cmp2(self):
if 1==1:
bucket = 'smalldata'
importFolderPath = "logreg"
csvFilename = 'prostate.csv'
if 1==0:
bucket = 'home-0xdiag-datasets'
importFolderPath = "standard"
csvFilename = 'covtype.data'
csvPathname = importFolderPath + "/" + csvFilename
# use L for lambda in h2o, C=1/L in sklearn
family = 'gaussian'
L = 1e-4
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_statsmodels_glm(self, bucket, csvPathname, L, family)
# since we invert for C, can't use 0 (infinity)
L = 0
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_statsmodels_glm(self, bucket, csvPathname, L, family)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
andreas-h/geodas
|
geodas/plotting.py
|
1
|
3141
|
# -*- coding: utf-8 -*-
#
# geodas - Geospatial Data Analysis in Python
#
#:Author: Andreas Hilboll <[email protected]>
#:Date: Tue Jan 22 10:56:08 2013
#:Website: http://andreas-h.github.com/geodas/
#:License: GPLv3
#:Version: 0.1
#:Copyright: (c) 2012-2013 Andreas Hilboll <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Library imports
# ============================================================================
from matplotlib import mpl
import matplotlib.pyplot as plt
import numpy as np
# Plotting a pcolormesh on a basemap
# ============================================================================
def pcolormesh(gdata, cbar=True, vmin=None, vmax=None, cmap=None,
ncolors=255, proj='cyl',
lon_0=None, lat_0=None, lat_1=None, ax=None):
# TODO: support kwargs for basmap instance
if len(gdata.coordinates) > 2:
raise ValueError("You asked me to pcolormesh a dataset with "
"dimension {ndim}, and I don't know how to do "
"that.".format(ndim=len(gdata.coordinates)))
from mpl_toolkits.basemap import Basemap
if ax is None:
plt.figure()
ax = plt.gca()
if not vmin:
vmin = np.nanmin(gdata.data)
if not vmax:
vmax = np.nanmax(gdata.data)
if not cmap:
cmap = mpl.cm.get_cmap('jet', ncolors)
elif isinstance(cmap, str):
cmap = mpl.cm.get_cmap(cmap, ncolors)
lons = gdata.coordinates['longitude']
lats = gdata.coordinates['latitude']
m = Basemap(llcrnrlon=lons.min(), llcrnrlat=lats.min(),
urcrnrlon=lons.max(), urcrnrlat=lats.max(),
projection=proj, resolution='l',
lon_0=lon_0, lat_0=lat_0, lat_1=lat_1, ax=ax)
m.drawcoastlines()
m.drawmapboundary()
if lons.ndim == 1 and lats.ndim == 1:
LON, LAT = m(np.meshgrid(lons, lats)[0],
np.meshgrid(lons, lats)[1])
elif lons.ndim == 2 and lats.ndim == 2:
LON, LAT = m(lons, lats)
plotdata = gdata.masked().data
if plotdata.shape == LON.T.shape:
plotdata = plotdata.T
colorNorm = mpl.colors.Normalize(vmin=vmin, vmax=vmax, clip=False)
plot = m.pcolormesh(LON, LAT, plotdata,
norm=colorNorm, cmap=cmap,
vmin=vmin, vmax=vmax)
if cbar:
plt.colorbar(plot, orientation='horizontal', norm=colorNorm,
extend='both', spacing='uniform')
return m
|
gpl-3.0
|
victorbergelin/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
254
|
2795
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
ktaneishi/deepchem
|
examples/factors/FACTORS_correlations.py
|
8
|
1407
|
"""
Script that computes correlations of FACTORS tasks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
import pandas as pd
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from FACTORS_datasets import load_factors
###Load data###
shard_size = 2000
print("About to load FACTORS data.")
FACTORS_tasks, datasets, transformers = load_factors(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
y_train = train_dataset.y
n_tasks = y_train.shape[1]
all_results = []
for task in range(n_tasks):
y_task = y_train[:, task]
for other_task in range(n_tasks):
if task == other_task:
continue
y_other = y_train[:, other_task]
r2 = dc.metrics.pearson_r2_score(y_task, y_other)
print("r2 for %s-%s is %f" % (task, other_task, r2))
all_results.append(r2)
# the histogram of the data
n, bins, patches = plt.hist(np.array(all_results), 50, normed=True, stacked=True,
facecolor='green', alpha=0.75)
plt.xlabel('Cross-task Correlations')
plt.ylabel('Probability Density')
plt.title('Histogram of Factors Intertask Correlations')
plt.grid(True)
plt.savefig("Factors_correlations.png")
|
mit
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p50.py
|
1
|
6693
|
"""IBW Tag Freq."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.reference import state_names
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {"state": "Aggregate by State", "wfo": "Aggregate by WFO"}
PDICT2 = {"percent": "Frequency [%]", "count": "Count"}
FONTSIZE = 12
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["cache"] = 300
desc[
"description"
] = """This app produces a table of frequencies of
wind and hail tags used in NWS Warnings at issuance."""
today = datetime.datetime.today() + datetime.timedelta(days=1)
desc["arguments"] = [
dict(
type="networkselect",
name="station",
network="WFO",
default="_ALL",
label="Select WFO:",
all=True,
),
dict(type="state", name="state", default="IA", label="Select State:"),
dict(
type="select",
name="opt",
default="wfo",
label="Plot for WFO(all option) or State:",
options=PDICT,
),
dict(
type="select",
name="p",
default="percent",
label="What to plot:",
options=PDICT2,
),
dict(
type="date",
name="date1",
optional=True,
default="2010/04/01",
label="Start Date Bounds (optional):",
min="2010/04/01",
),
dict(
type="date",
name="date2",
optional=True,
default=today.strftime("%Y/%m/%d"),
label="Start Date Bounds (optional):",
min="2010/04/01",
),
]
return desc
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
ctx["_nt"].sts["_ALL"] = {"name": "All Offices"}
opt = ctx["opt"]
station = ctx["station"]
state = ctx["state"]
date1 = ctx.get("date1", datetime.date(2010, 4, 1))
date2 = ctx.get(
"date2", datetime.date.today() + datetime.timedelta(days=1)
)
pgconn = get_dbconn("postgis")
wfo_limiter = ("and wfo = '%s' ") % (
station if len(station) == 3 else station[1:],
)
if station == "_ALL":
wfo_limiter = ""
sql = f"""
select windtag, hailtag,
min(issue at time zone 'UTC') as min_issue,
max(issue at time zone 'UTC') as max_issue, count(*)
from sbw WHERE issue >= %s and issue <= %s {wfo_limiter}
and (windtag > 0 or hailtag > 0)
and status = 'NEW' and phenomena = 'SV'
GROUP by windtag, hailtag
"""
args = (date1, date2)
supextra = ""
if opt == "wfo" and station != "_ALL":
supextra = "For warnings issued by %s %s.\n" % (
station,
ctx["_nt"].sts[station]["name"],
)
if opt == "state":
supextra = ("For warnings that covered some portion of %s.\n") % (
state_names[state],
)
sql = """
SELECT windtag, hailtag,
min(issue at time zone 'UTC') as min_issue,
max(issue at time zone 'UTC') as max_issue, count(*)
from sbw w, states s
WHERE issue >= %s and issue <= %s and
s.state_abbr = %s and ST_Intersects(s.the_geom, w.geom) and
(windtag > 0 or hailtag > 0)
and status = 'NEW' and phenomena = 'SV'
GROUP by windtag, hailtag
"""
args = (date1, date2, state)
df = read_sql(sql, pgconn, params=args, index_col=None)
if df.empty:
raise NoDataFound("No data was found.")
minvalid = df["min_issue"].min()
maxvalid = df["max_issue"].max()
df.fillna(0, inplace=True)
total = df["count"].sum()
uniquehail = df["hailtag"].unique().tolist()
uniquehail.sort()
uniquehail = uniquehail[::-1]
uniquewind = df["windtag"].astype(int).unique().tolist()
uniquewind.sort()
gdf = df.set_index(["hailtag", "windtag"])
(fig, ax) = plt.subplots(figsize=(8, 6))
for (hailtag, windtag), row in gdf.iterrows():
y = uniquehail.index(hailtag)
x = uniquewind.index(windtag)
val = row["count"] / total * 100.0
ax.text(
x,
y,
"%.2f" % (val,) if ctx["p"] == "percent" else row["count"],
ha="center",
fontsize=FONTSIZE,
color="r" if val >= 10 else "k",
va="center",
bbox=dict(color="white", boxstyle="square,pad=0"),
)
for hailtag, row in df.groupby("hailtag").sum().iterrows():
y = uniquehail.index(hailtag)
x = len(uniquewind)
val = row["count"] / total * 100.0
ax.text(
x,
y,
"%.2f" % (val,) if ctx["p"] == "percent" else int(row["count"]),
ha="center",
fontsize=FONTSIZE,
color="r" if val >= 10 else "k",
va="center",
bbox=dict(color="white", boxstyle="square,pad=0"),
)
for windtag, row in df.groupby("windtag").sum().iterrows():
y = -1
x = uniquewind.index(windtag)
val = row["count"] / total * 100.0
ax.text(
x,
y,
"%.2f" % (val,) if ctx["p"] == "percent" else int(row["count"]),
ha="center",
fontsize=FONTSIZE,
color="r" if val >= 10 else "k",
va="center",
bbox=dict(color="white", boxstyle="square,pad=0"),
)
ax.set_xticks(range(len(uniquewind) + 1))
ax.set_yticks(range(-1, len(uniquehail)))
ax.set_xlim(-0.5, len(uniquewind) + 0.5)
ax.set_ylim(-1.5, len(uniquehail) - 0.5)
ax.set_xticklabels(uniquewind + ["Total"], fontsize=14)
ax.set_yticklabels(["Total"] + uniquehail, fontsize=14)
ax.xaxis.tick_top()
ax.set_xlabel("Wind Speed [mph]", fontsize=14)
ax.set_ylabel("Hail Size [inch]", fontsize=14)
ax.xaxis.set_label_position("top")
plt.tick_params(top=False, bottom=False, left=False, right=False)
fig.suptitle(
(
"%s of NWS Wind/Hail Tags for "
"Severe Thunderstorm Warning Issuance\n"
"%s through %s, %.0f warnings\n%s"
"Values larger than 10%% in red"
)
% (
PDICT2[ctx["p"]],
minvalid.strftime("%-d %b %Y"),
maxvalid.strftime("%-d %b %Y"),
df["count"].sum(),
supextra,
)
)
ax.set_position([0.12, 0.05, 0.86, 0.72])
return fig, df
if __name__ == "__main__":
plotter(dict())
|
mit
|
vibhorag/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
205
|
10378
|
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
|
bsd-3-clause
|
spallavolu/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
plissonf/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
84
|
7866
|
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
|
bsd-3-clause
|
Bone-Imaging-ToolKit/BItk
|
setup.py
|
1
|
1881
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
import os
# import the lib (be careful with side effects)
cwd = os.path.dirname(__file__) or './'
sys.path.append(cwd)
import bitk as btk
# the proper setup
setup(
# module name
name=btk.__name__,
# module version
version=btk.__version__,
# author
author=btk.__maintainer__,
# author's mail (with public visibility)
author_email=btk.__contact__,
# short summary
description="A python3 module for bone image processing",
# long description
# - dump the README.md file -
long_description=open('README.md').read(),
packages=find_packages(),
# list of the dependencies
# e.g. ["module", "module >= 0.3", "module==0.5a7"]
install_requires=["numpy",
"numba",
"mahotas",
"pydicom",
"imread",
"pandas"],
# extra data to include in the module
# - dump the MANIFEST.in file -
include_package_data=True,
# url of the module
url=btk.__url__,
# list of classifiers for the module
# https://pypi.python.org/pypi?%3Aaction=list_classifiers.
classifiers=[
"Programming Language :: Python",
"Development Status :: 1 - Planning",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Natural Language :: French",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: >=3.6",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps."
],
# Other
# ...
)
|
gpl-3.0
|
RPGOne/scikit-learn
|
examples/linear_model/plot_ols.py
|
104
|
1936
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
mehdidc/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
12
|
2594
|
import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from sklearn.mixture.tests.test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
|
bsd-3-clause
|
WhatDo/FlowFairy
|
examples/sine_fix/stages.py
|
1
|
4736
|
import tensorflow as tf
import numpy as np
import os
import io
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
from feature import classify
import matplotlib.pyplot as plt
from flowfairy.core.stage import register, Stage, stage
from flowfairy.conf import settings
from flowfairy import app
def get_log_dir():
return os.path.join(settings.LOG_DIR, settings.LOGNAME)
def norm(tensor):
tmin = tf.reduce_min(tensor)
return tf.div((tensor - tmin), (tf.reduce_max(tensor) - tmin) + 1e-12)
@register(100)
class SummaryStage(Stage):
def fig2rgb_array(self, expand=True):
self.figure.canvas.draw()
buf = self.figure.canvas.tostring_rgb()
ncols, nrows = self.figure.canvas.get_width_height()
shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
return np.fromstring(buf, dtype=np.uint8).reshape(shape)
def reset_fig(self):
self.figure = plt.figure(num=0, figsize=(6,4), dpi=300)
self.figure.clf()
def before(self, sess, net):
tf.summary.scalar('train_acc', net.train_acc)
tf.summary.scalar('train_cost', net.train_cost)
tf.summary.scalar('train_uncertainty', net.train_ouncertainty)
tf.summary.scalar('val_acc', net.val_acc)
tf.summary.scalar('val_cost', net.val_cost)
tf.summary.scalar('val_uncertainty', net.val_ouncertainty)
tf.contrib.layers.summarize_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.net = net
arg = tf.argmax(self.net.train_pred, 2)
tf.summary.audio('input', norm(self.net.train_x), settings.SAMPLERATE)
tf.summary.audio('target', norm(tf.cast(self.net.train_y, tf.float32)), settings.SAMPLERATE)
tf.summary.audio('pred', norm(tf.cast(arg, tf.float32)), settings.SAMPLERATE)
self.reset_fig()
img = self.fig2rgb_array()
self.train_image_in = tf.placeholder(np.uint8, shape=img.shape)
self.train_image = tf.Variable(np.zeros(img.shape, dtype=np.uint8), trainable=False, name='train_graph_image')
self.train_image_assign = self.train_image.assign(self.train_image_in)
tf.summary.image('train_graph', self.train_image)
self.val_image_in = tf.placeholder(np.uint8, shape=img.shape)
self.val_image = tf.Variable(np.zeros(img.shape, dtype=np.uint8), trainable=False, name='val_graph_image')
self.val_image_assign = self.val_image.assign(self.val_image_in)
tf.summary.image('val_graph', self.val_image)
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(get_log_dir(), sess.graph)
def plot(self, sess, pred, x, y, unc):
self.reset_fig()
x = norm(x)
res, x, y, unc = sess.run([ pred, x, y, unc ])
x = classify(x)
res = np.argmax(res, 2)
#start = np.random.randint(500)
start = 500
end = start + 128
discrete_class = settings.DISCRETE_CLASS
bar_x = np.arange(128)
plt.subplot('111').bar(bar_x, unc[0,start:end]*discrete_class, color='violet', alpha=0.3)
plt.subplot('111').plot(res[0,start:end],'r')
plt.subplot('111').plot(y[0,start:end],'b', alpha=0.5)
plt.subplot('111').plot(x[0,start:end],'g', alpha=0.5)
def draw_img(self, sess):
self.plot(sess, self.net.train_pred, self.net.train_x, self.net.train_y, self.net.train_uncertainty)
sess.run(self.train_image_assign, feed_dict={self.train_image_in: self.fig2rgb_array()})
self.plot(sess, self.net.val_pred, self.net.val_x, self.net.val_y, self.net.val_uncertainty)
sess.run(self.val_image_assign, feed_dict={self.val_image_in: self.fig2rgb_array()})
def run(self, sess, i):
self.draw_img(sess)
summary = sess.run(self.merged)
self.writer.add_summary(summary, i)
@register()
class TrainingStage(Stage):
def before(self, sess, net):
self.optimizer = net.optimizer
def run(self, sess, i):
sess.run(self.optimizer)
@register(1000)
class SavingStage(Stage):
def before(self, sess, net):
self.saver = tf.train.Saver()
latest = tf.train.latest_checkpoint(settings.LOG_DIR, latest_filename=self.latest_filename())
if latest:
self.saver.restore(sess, latest)
global_step = int(latest[latest.rfind('-')+1:])
app.set_global_step(global_step)
print(f'Restored {self.latest_filename()}')
def run(self, sess, i):
self.saver.save(sess, self.ckpt(), global_step=i, latest_filename=self.latest_filename())
def ckpt(self):
return get_log_dir()+'.ckpt'
def latest_filename(self):
return settings.LOGNAME+'.checkpoint'
|
mit
|
JoshGlue/RU-CCN
|
Test.py
|
1
|
2773
|
from DQN import Estimator
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import spline
from stock_env import Stock
env = Stock()
VALID_ACTIONS = env.VALID_ACTIONS
experiment_dir = os.path.abspath("./experiments/{}".format(env.spec.id))
estimator = Estimator(scope="q", summaries_dir=experiment_dir)
experiment_dir = os.path.abspath("./experiments/{}".format(env.spec.id))
stocks_to_iterate = 1000
smoothing = 500
x = np.array(range(stocks_to_iterate))
def deep_q_investing():
profit = 0
stocks_invested = 0
stocks_iterated = 0
y = []
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
state = env.reset()
while stocks_iterated < stocks_to_iterate:
state = np.squeeze(np.reshape(state, [80, 80]))
state = np.stack([state] * 4, axis=2)
state = np.array([state])
q_values = estimator.predict(sess, state)[0]
best_action = np.argmax(q_values)
action = VALID_ACTIONS[best_action]
next_state, reward, done, _ = env.step(action)
if done:
profit += reward
stocks_invested += reward != 0
y.append(profit/(stocks_invested or 1))
state = env.reset()
stocks_iterated += 1
print ("Stock {}/{} , Profit: {}".format(stocks_iterated, stocks_to_iterate, profit/(stocks_invested or 1)))
else:
state = next_state
x_new = np.linspace(x.min(),x.max(),smoothing)
y = np.array(y)
y_smooth = spline(x, y, x_new)
return [plt.plot(x_new, y_smooth, linewidth=2, label='Deep Q'),profit / (stocks_invested or 1)]
def random_investing():
profit = 0
stocks_invested = 0
stocks_iterated = 0
y = []
state = env.reset()
while stocks_iterated < stocks_to_iterate:
action = np.random.choice(np.array(VALID_ACTIONS))
next_state, reward, done, _ = env.step(action)
if done:
profit += reward
stocks_invested += reward != 0
y.append(profit / (stocks_invested or 1))
state = env.reset()
stocks_iterated += 1
print(
"Stock {}/{} , Profit: {}".format(stocks_iterated, stocks_to_iterate, profit / (stocks_invested or 1)))
else:
state = next_state
x_new = np.linspace(x.min(),x.max(),smoothing)
y = np.array(y)
y_smooth = spline(x, y, x_new)
return [plt.plot(x_new, y_smooth, linewidth=2, label='Random'),profit / (stocks_invested or 1)]
plt.clf()
deep_q_investing()
random_investing()
plt.legend(loc='upper left')
plt.show()
plt.ylabel("Profit")
plt.xlabel("Runs")
|
apache-2.0
|
emersongreen/asciiclass
|
lectures/lec6/match-loop.py
|
3
|
2094
|
import csv
from sklearn import tree
import editdist
import re
def string_match_score(p1,p2,field):
s1 = p1[field]
s2 = p2[field]
return editdist.distance(s1.lower(),s2.lower())/float(len(s1))
def jaccard_score(p1,p2,field):
name1 = p1[field]
name2 = p2[field]
set1 = set(name1.lower().split())
set2 = set(name2.lower().split())
c = set1.intersection(set2)
return float(len(c)) / (len(set1) + len(set2) - len(c))
def price_score(p1,p2,field):
price1 = p1[field]
if (len(price1) == 0): return 10000
price2 = p2[field]
if (len(price2) == 0): return 10000
price1 = re.sub('[\$,]', '', price1)
price2 = re.sub('[\$,]', '', price2)
price1 = float(price1)
price2 = float(price2)
return abs(price1 - price2)
print "Loading Data"
abtReader = csv.DictReader(open("Abt.csv","rU"))
buyReader = csv.DictReader(open("Buy.csv","rU"))
gtLines = csv.DictReader(open("abt_buy_perfectMapping.csv","rU"))
gtBuyMap = {}
gtAbtMap = {}
abtAr = []
buyAr = []
for r in abtReader:
abtAr.append(r)
for r in buyReader:
buyAr.append(r)
for r in gtLines:
gtAbtMap[r["idAbt"]] = r["idBuy"]
gtBuyMap[r["idBuy"]] = r["idAbt"]
for loop in range(0,10,1):
falsePos = 0
truePos = 0
falseNeg = 0
trueNeg = 0
thresh = float(loop)/10.0
for r1 in buyAr:
bestMatch = 0
bestVal = []
j = 0
for r2 in abtAr:
s = jaccard_score(r1,r2,"name")
if (s > bestMatch):
bestMatch = s
bestVal = r2
if (bestMatch > thresh):
# print "Best match: ",r1["name"],bestVal["name"],"score=",bestMatch
if (gtBuyMap[r1["id"]] == bestVal["id"]):
truePos = truePos + 1
else:
falsePos = falsePos + 1
precision = truePos / float(truePos + falsePos)
recall = truePos / float(len(buyAr))
fmeas = (2.0 * precision * recall) / (precision + recall)
print "THRESH = ",thresh,"TP = ",truePos,"FP = ",falsePos,"PREC = ",precision,"RECALL = ",recall,"F = ",fmeas
|
mit
|
dario-chiappetta/Due
|
due/episode.py
|
1
|
12174
|
"""
An Episode is a sequence of Events issued by agents. Here we define an interface
for Episodes, as well as some helper methods to manipulate their content:
* :class:`Episode` models recorded Episodes that can be used to train agents
* :class:`LiveEpisode` models Episodes that are still in progress.
* :func:`extract_utterance_pairs` will extract utterances as strings from Episodes.
API
===
"""
import io
import json
import uuid
import asyncio
import logging
from functools import lru_cache
from datetime import datetime
import numpy as np
import pandas as pd
import due.agent
from due.util.time import convert_datetime, parse_timedelta
UTTERANCE_LABEL = 'utterance'
MAX_EVENT_RESPONSES = 200
class Episode(object):
"""
An Episode is a sequence of Events issued by Agents
"""
def __init__(self, starter_agent_id, invited_agent_id):
self._logger = logging.getLogger(__name__ + ".Episode")
self.starter_id = starter_agent_id
self.invited_id = invited_agent_id
self.id = str(uuid.uuid1())
self.timestamp = datetime.now()
self.events = []
def __eq__(self, other):
if isinstance(other, Episode):
if self.starter_id != other.starter_id: return False
if self.invited_id != other.invited_id: return False
if self.id != other.id: return False
if self.timestamp != other.timestamp: return False
if self.events != other.events: return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def last_event(self, event_type=None):
"""
Returns the last event in the Episode. Optionally, events can be filtered
by type.
:param event_type: an event type, or a collection of types
:type event_type: :class:`Event.Type` or list of :class:`Event.Type`
"""
event_type = event_type if not isinstance(event_type, Event.Type) else (event_type,)
for e in reversed(self.events):
if event_type is None or e.type in event_type:
return e
return None
def save(self, output_format='standard'):
"""
Save the Episode to a serializable object, that can be loaded with
:meth:`due.episode.Episode.load`.
By default, episodes are saved in the **standard** format, which is a
dict of metadata with a list of saved Events, which format is handled by
the :class:`due.event.Event` class).
It is also possible to save the Episode in the **compact** format. In
compact representation, event objects are squashed into CSV lines. This
makes them slower to load and save, but more readable and easily
editable without the use of external tools; because of this, they are
especially suited for toy examples and small hand-crafted corpora.
:return: a serializable representation of `self` :rtype: `dict`
"""
result = {
'id': self.id,
'timestamp': self.timestamp,
'starter_agent': str(self.starter_id),
'invited_agents': [str(self.invited_id)],
'events': [e.save() for e in self.events],
'format': 'standard'
}
if output_format == 'compact':
return _compact_saved_episode(result)
return result
@staticmethod
def load(saved_episode):
"""
Loads an Episode as it was saved by :meth:`due.episode.Episode.save`.
:param saved_episode: the episode to be loaded
:type saved_episode: `dict`
:return: an Episode object representing `saved_episode`
:rtype: :class:`due.episode.Episode`
"""
if saved_episode['format'] == 'compact':
saved_episode = _uncompact_saved_episode(saved_episode)
result = Episode(saved_episode['starter_agent'], saved_episode['invited_agents'][0])
result.id = saved_episode['id']
result.timestamp = convert_datetime(saved_episode['timestamp'])
result.events = [Event.load(e) for e in saved_episode['events']]
return result
class LiveEpisode(Episode):
"""
A LiveEpisode is an Episode that is currently under way. That is, new Events
can be acted in it.
:param starter_agent: the Agent which started the Episode
:type starter_agent: :class:`due.agent.Agent`
:param invited_agent: the agent invited to the Episode
:type invited_agent: :class:`due.agent.Agent`
"""
def __init__(self, starter_agent, invited_agent):
super().__init__(starter_agent.id, invited_agent.id)
self._logger = logging.getLogger(__name__ + ".LiveEpisode")
self.starter = starter_agent
self.invited = invited_agent
self._agent_by_id = {
starter_agent.id: starter_agent,
invited_agent.id: invited_agent
}
def add_event(self, event):
"""
Adds an Event to the LiveEpisode, triggering the
:meth:`due.agent.Agent.event_callback` method on the other participants.
Response Events that are returned from the callback which will be
processed iteratively.
:param agent: the agent which acted the Event
:type agent: :class:`due.agent.Agent`
:param event: the event that was acted by the Agent
:type event: :class:`due.event.Event`
"""
new_events = [event]
count = 0
while new_events:
e = new_events.pop(0)
self._logger.info("New %s event by %s: '%s'", e.type.name, e.agent, e.payload)
agent = self.agent_by_id(e.agent)
self.events.append(e)
e.mark_acted()
for a in self._other_agents(agent):
self._logger.info("Notifying %s", a)
response_events = a.event_callback(e, self)
new_events.extend(response_events)
count += 1
if count > MAX_EVENT_RESPONSES:
self._logger.warning("Agents reached maximum number of responses allowed for a single Event (%s). Further Events won't be notified to Agents", MAX_EVENT_RESPONSES)
break
self.events.extend(new_events)
[e.mark_acted() for e in new_events]
def agent_by_id(self, agent_id):
"""
Retrieve the :class:`due.agent.Agent` object of one of the agents that
are participating in the :class:`LiveEpisode`. Raise `ValueError` if the
given ID does not correspond to any of the agents in the Episode.
:param agent_id: ID of one of the agents in the LiveEpisode
:type agent_id: :class:`due.agent.Agent`
"""
if agent_id not in self._agent_by_id:
raise ValueError(f"Agent '{agent_id}' not found in LiveEpisode {self}")
result = self._agent_by_id[agent_id]
assert isinstance(result, due.agent.Agent)
return result
def _other_agents(self, agent):
return [self.starter] if agent == self.invited else [self.invited]
class AsyncLiveEpisode(LiveEpisode):
"""
This is a subclass of :class:`LiveEpisode` that implement asynchronous
notification of new Events.
"""
def add_event(self, event):
self.events.append(event)
event.mark_acted()
agent = self.agent_by_id(event.agent)
for a in self._other_agents(agent):
loop = asyncio.get_event_loop()
loop.create_task(self.async_event_callback(a, event))
async def async_event_callback(self, agent, event):
self._logger.info("Notifying event %s to agent %s", event, agent)
response_events = agent.event_callback(event, self)
if response_events:
for e in response_events:
self.add_event(e)
#
# Save/Load Helpers
#
def _compact_saved_episode(saved_episode):
"""
Convert a saved episode into a compact representation.
"""
events = saved_episode['events']
events = [_compact_saved_event(e) for e in events]
df = pd.DataFrame(events)
s = io.StringIO()
df.to_csv(s, sep='|', header=False, index=False)
compact_events = [l for l in s.getvalue().split('\n') if l]
return {**saved_episode, 'events': compact_events, 'format': 'compact'}
def _compact_saved_event(saved_event):
"""
Prepare an Event for compact serialization, meaning that its fields must
be writable as a line of CSV). This is always the case, except for Actions,
which payloads are objects. In this case, we serialize them as JSON.
"""
e = saved_event
if e['type'] == Event.Type.Action.value:
return {**e, 'payload': json.dumps(e['payload'])}
return e
def _uncompact_saved_episode(compact_episode):
"""
Convert a compacted saved episode back to the standard format.
"""
buf = io.StringIO('\n'.join(compact_episode['events']))
df = pd.read_csv(buf, sep='|', names=['type', 'timestamp', 'agent', 'payload'])
compact_events = df.replace({np.nan:None}).to_dict(orient='records')
events = []
last_timestamp = convert_datetime(compact_episode['timestamp'])
for e in compact_events:
e_new = _uncompact_saved_event(e, last_timestamp)
events.append(e_new)
last_timestamp = e_new['timestamp']
return {**compact_episode, 'events': events, 'format': 'standard'}
def _uncompact_saved_event(compact_event, last_timestamp):
"""
Note that `compact_event` is not the CSV line. It is already its dict
representation, but Action payloads need to be deserialized from JSON.
Also, the Pandas interpretation of CSV needs to be fixed, by converting
timestamps to `datetime`, and converting NaN values to `None`.
"""
e = compact_event
timestamp = _uncompact_timestamp(compact_event, last_timestamp)
e = {**e, 'timestamp': timestamp}
if compact_event['type'] == Event.Type.Action.value:
e['payload'] = json.loads(e['payload'])
return e
def _uncompact_timestamp(compact_event, last_timestamp):
"""
In compacted episodes the timestamp can be a ISO string, or as a time
difference from the previous event; in this latter case, the delta must be
expressed as a int (number of seconds) or in the `1d2h3m4s` format (see
:func:`due.util.time.parse_timedelta`).
"""
try:
return convert_datetime(compact_event['timestamp'])
except ValueError:
return last_timestamp + parse_timedelta(compact_event['timestamp'])
#
# Utilities
#
def _is_utterance(event):
return event.type == Event.Type.Utterance
def extract_utterances(episode, preprocess_f=None, keep_holes=False):
"""
Return all the utterances in an Episode as strings. If the `keep_holes`
parameter is set `True`, non-utterance events will be returned as well, as
`None` elements in the resulting list.
:param episode: the Episode to extract utterances from
:type episode: :class:`Episode`
:param preprocess_f: when given, sentences will be run through this function before being returned
:type preprocess_f: `func`
:param keep_holes: if `True`, `None` elements will be returned in place of non-utterance events.
:return: the list of utterances in the Episode
:rtype: `list` of `str`
"""
if not preprocess_f:
preprocess_f = lambda x: x
result = []
for e in episode.events:
if e.type == Event.Type.Utterance:
result.append(preprocess_f(e.payload))
elif keep_holes:
result.append(None)
return result
def extract_utterance_pairs(episode, preprocess_f=None):
"""
Process Events in an Episode, extracting all the Utterance Event pairs that
can be interpreted as one dialogue turn (ie. an Agent's utterance, and a
different Agent's response).
In particular, Event pairs are extracted from the Episode so that:
* Both Events are Utterances (currently, non-utterances will raise an exception)
* The second Event immediately follows the first
* The two Events are acted by two different Agents
This means that if an utterance has more than one answers, only the first
one will be included in the result.
If a `preprocess_f` function is specified, resulting utterances will be run
through this function before being returned. A LRU Cache is applied to
`preprocess_f`, as most sentences will be returned as both utterances and
answers/
Return two lists of the same length, so that each utterance `X_i` in the
first list has its response `y_i` in the second.
:param episode: an Episode
:type episode: :class:`due.episode.Episode`
:param preprocess_f: when given, sentences will be run through this function before being returned
:type preprocess_f: `func`
:return: a list of utterances and the list of their answers (one per utterance)
:rtype: (`list`, `list`)
"""
preprocess_f = lru_cache(4)(preprocess_f) if preprocess_f else lambda x: x
result_X = []
result_y = []
for e1, e2 in zip(episode.events, episode.events[1:]):
if not _is_utterance(e1) or not _is_utterance(e2):
raise NotImplementedError("Non-utterance Events are not supported yet")
if e1.agent != e2.agent and e1.payload and e2.payload:
result_X.append(preprocess_f(e1.payload))
result_y.append(preprocess_f(e2.payload))
return result_X, result_y
# Quick fix for circular dependencies
from due.event import Event
|
gpl-3.0
|
Bulochkin/tensorflow_pack
|
tensorflow/examples/learn/iris_val_based_early_stopping.py
|
62
|
2827
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/tests/sparse/test_frame.py
|
3
|
49520
|
# pylint: disable-msg=E1101,W0612
import operator
import pytest
from warnings import catch_warnings
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float_dtype,
is_object_dtype,
is_float)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.offsets import BDay
from pandas.util import testing as tm
from pandas.compat import lrange
from pandas import compat
from pandas.core.sparse import frame as spf
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.core.sparse.api import SparseSeries, SparseDataFrame, SparseArray
from pandas.tests.frame.test_api import SharedWithSparse
class TestSparseDataFrame(SharedWithSparse):
klass = SparseDataFrame
def setup_method(self, method):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.orig = pd.DataFrame(self.data, index=self.dates)
self.iorig = pd.DataFrame(self.data, index=self.dates)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zorig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_orig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_as_matrix(self):
empty = self.empty.as_matrix()
assert empty.shape == (0, 0)
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
assert mat.shape == (10, 0)
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
assert mat.shape == (0, 10)
def test_copy(self):
cp = self.frame.copy()
assert isinstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
# this is now identical (but not is_a )
assert cp.index.identical(self.frame.index)
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
assert isinstance(series, SparseSeries)
assert isinstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
assert self.zframe['A'].fill_value == 0
tm.assert_numpy_array_equal(pd.SparseArray([1., 2., 3., 4., 5., 6.]),
self.zframe['A'].values)
tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2.,
3., 4., 5., 6.]),
self.zframe['A'].to_dense().values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
assert isinstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(
self.frame, index=idx, columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind, copy=True)
reindexed = self.frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
with pytest.raises(TypeError):
self.frame.reindex(idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates, columns=['A'])
tm.assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
pytest.raises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with tm.assert_raises_regex(ValueError, "^Index length"):
SparseDataFrame(self.frame.values, index=self.frame.index[:-1])
with tm.assert_raises_regex(ValueError, "^Column length"):
SparseDataFrame(self.frame.values, columns=self.frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
assert len(sp.index) == 0
assert len(sp.columns) == 0
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
assert sdf[0].index is sdf[1].index
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
assert isinstance(x, SparseSeries)
df = SparseDataFrame(x)
assert isinstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.loc[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.loc[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == np.int64
assert arr.fill_value == 0
df = pd.SparseDataFrame({'x': arr})
assert df['x'].dtype == np.int64
assert df['x'].fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == np.int64
assert s.fill_value == 0
df = pd.SparseDataFrame(s)
assert df['x'].dtype == np.int64
assert df['x'].fill_value == 0
df = pd.SparseDataFrame({'x': s})
assert df['x'].dtype == np.int64
assert df['x'].fill_value == 0
def test_constructor_nan_dataframe(self):
# GH 10079
trains = np.arange(100)
tresholds = [10, 20, 30, 40, 50, 60]
tuples = [(i, j) for i in trains for j in tresholds]
index = pd.MultiIndex.from_tuples(tuples,
names=['trains', 'tresholds'])
matrix = np.empty((len(index), len(trains)))
matrix.fill(np.nan)
df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)
result = df.to_sparse()
expected = pd.SparseDataFrame(matrix, index=index, columns=trains,
dtype=float)
tm.assert_sp_frame_equal(result, expected)
def test_type_coercion_at_construction(self):
# GH 15682
result = pd.SparseDataFrame(
{'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8',
default_fill_value=0)
expected = pd.SparseDataFrame(
{'a': pd.SparseSeries([1, 0, 0], dtype='uint8'),
'b': pd.SparseSeries([0, 1, 0], dtype='uint8'),
'c': pd.SparseSeries([0, 0, 1], dtype='uint8')},
default_fill_value=0)
tm.assert_sp_frame_equal(result, expected)
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
tm.assert_series_equal(result, expected)
def test_shape(self):
# see gh-10452
assert self.frame.shape == (10, 4)
assert self.iframe.shape == (10, 4)
assert self.zframe.shape == (10, 4)
assert self.fill_frame.shape == (10, 4)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame, orig):
result = tm.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
assert isinstance(sdf, SparseDataFrame)
assert np.isnan(sdf.default_fill_value)
assert isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
assert isinstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
assert sdf.default_fill_value == 0
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
assert df.density == 0.7
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
assert df.density == 0.75
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
self._check_frame_ops(self.frame)
def test_sparse_series_ops_i(self):
self._check_frame_ops(self.iframe)
def test_sparse_series_ops_z(self):
self._check_frame_ops(self.zframe)
def test_sparse_series_ops_fill(self):
self._check_frame_ops(self.fill_frame)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
assert isinstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = self.frame + self.frame.loc[:, ['A', 'B']] # noqa
def test_op_corners(self):
empty = self.empty + self.empty
assert empty.empty
foo = self.frame + self.empty
assert isinstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
tm.assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
pytest.raises(Exception, sdf.__getitem__, ['a', 'd'])
def test_iloc(self):
# 2227
result = self.frame.iloc[:, 0]
assert isinstance(result, SparseSeries)
tm.assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
tm.assert_class_equal(iframe['A'].sp_index,
iframe.iloc[:, 0].sp_index)
def test_set_value(self):
# ok, as the index gets converted to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
assert res.index.dtype == 'object'
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
assert res is not self.frame
assert res.index[-1] == 'foobar'
assert res.get_value('foobar', 'B') == 1.5
res2 = res.set_value('foobar', 'qux', 1.5)
assert res2 is not res
tm.assert_index_equal(res2.columns,
pd.Index(list(self.frame.columns) + ['qux']))
assert res2.get_value('foobar', 'qux') == 1.5
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.iloc[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.iloc[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
tm.assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
pytest.raises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
assert isinstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == 'E'
# insert Series
frame['F'] = frame['A'].to_dense()
assert isinstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
assert isinstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
assert len(frame['I'].sp_values) == N // 2
# insert ndarray wrong size
pytest.raises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
assert len(frame['J'].sp_values) == N
assert (frame['J'].sp_values == 5).all()
frame['K'] = frame.default_fill_value
assert len(frame['K'].sp_values) == 0
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
tm.assert_sp_series_equal(self.frame['a'], self.frame['B'],
check_names=False)
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
tm.assert_sp_series_equal(self.frame['E'], self.frame['B'],
check_names=False)
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
tm.assert_sp_series_equal(self.frame['E'].reindex(index),
self.frame['F'].reindex(index),
check_names=False)
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
assert 'B' not in self.frame
tm.assert_sp_series_equal(self.frame['A'], A)
tm.assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
assert 'D' not in self.frame
del self.frame['A']
assert 'A' not in self.frame
def test_set_columns(self):
self.frame.columns = self.frame.columns
pytest.raises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
pytest.raises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, self.frame, exact_indices=False)
a = self.frame.iloc[:5, :3]
b = self.frame.iloc[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended.iloc[:, :3], self.frame.iloc[:, :3],
exact_indices=False)
def test_apply(self):
applied = self.frame.apply(np.sqrt)
assert isinstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
assert applied['A'].fill_value == np.sqrt(2)
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
assert isinstance(broadcasted, SparseDataFrame)
exp = self.frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
assert self.empty.apply(np.sqrt) is self.empty
from pandas.core import nanops
applied = self.frame.apply(np.sum)
tm.assert_series_equal(applied,
self.frame.to_dense().apply(nanops.nansum))
def test_apply_nonuq(self):
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
sparse = orig.to_sparse()
res = sparse.apply(lambda s: s[0], axis=1)
exp = orig.apply(lambda s: s[0], axis=1)
# dtype must be kept
assert res.dtype == np.int64
# ToDo: apply must return subclassed dtype
assert isinstance(res, pd.Series)
tm.assert_series_equal(res.to_dense(), exp)
# df.T breaks
sparse = orig.T.to_sparse()
res = sparse.apply(lambda s: s[0], axis=0) # noqa
exp = orig.T.apply(lambda s: s[0], axis=0)
# TODO: no non-unique columns supported in sparse yet
# tm.assert_series_equal(res.to_dense(), exp)
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
assert isinstance(result, SparseDataFrame)
def test_astype(self):
sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],
dtype=np.int64),
'B': SparseArray([4, 5, 6, 7],
dtype=np.int64)})
assert sparse['A'].dtype == np.int64
assert sparse['B'].dtype == np.int64
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=0.),
'B': SparseArray([4., 5., 6., 7.],
fill_value=0.)},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == np.float64
assert res['B'].dtype == np.float64
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
dtype=np.int64)},
default_fill_value=0)
assert sparse['A'].dtype == np.int64
assert sparse['B'].dtype == np.int64
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([0., 2., 0., 4.],
fill_value=0.),
'B': SparseArray([0., 5., 0., 7.],
fill_value=0.)},
default_fill_value=0.)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == np.float64
assert res['B'].dtype == np.float64
def test_astype_bool(self):
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
fill_value=0,
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
fill_value=0,
dtype=np.int64)},
default_fill_value=0)
assert sparse['A'].dtype == np.int64
assert sparse['B'].dtype == np.int64
res = sparse.astype(bool)
exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False),
'B': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False)},
default_fill_value=False)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == np.bool
assert res['B'].dtype == np.bool
def test_fillna(self):
df = self.zframe.reindex(lrange(5))
dense = self.zorig.reindex(lrange(5))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
expected = dense['A'].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_rename(self):
result = self.frame.rename(index=str)
expected = SparseDataFrame(self.data, index=self.dates.strftime(
"%Y-%m-%d %H:%M:%S"))
tm.assert_sp_frame_equal(result, expected)
result = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x)))
data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C1': np.arange(10, dtype=np.float64),
'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
expected = SparseDataFrame(data, index=self.dates)
tm.assert_sp_frame_equal(result, expected)
def test_corr(self):
res = self.frame.corr()
tm.assert_frame_equal(res, self.frame.to_dense().corr())
def test_describe(self):
self.frame['foo'] = np.nan
self.frame.get_dtype_counts()
str(self.frame)
desc = self.frame.describe() # noqa
def test_join(self):
left = self.frame.loc[:, ['A', 'B']]
right = self.frame.loc[:, ['C', 'D']]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, self.frame, exact_indices=False)
right = self.frame.loc[:, ['B', 'D']]
pytest.raises(Exception, left.join, right)
with tm.assert_raises_regex(ValueError,
'Other Series must have a name'):
self.frame.join(Series(
np.random.randn(len(self.frame)), index=self.frame.index))
def test_reindex(self):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
tm.assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
assert len(length_zero) == 0
assert len(length_zero.columns) == len(frame.columns)
assert len(length_zero['A']) == 0
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
assert len(length_n) == len(frame)
assert len(length_n.columns) == len(frame.columns)
assert len(length_n['A']) == len(frame)
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
assert len(reindexed.columns) == 3
tm.assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
assert np.isnan(reindexed['Z'].sp_values).all()
_check_frame(self.frame)
_check_frame(self.iframe)
_check_frame(self.zframe)
_check_frame(self.fill_frame)
# with copy=False
reindexed = self.frame.reindex(self.frame.index, copy=False)
reindexed['F'] = reindexed['A']
assert 'F' in self.frame
reindexed = self.frame.reindex(self.frame.index)
reindexed['G'] = reindexed['A']
assert 'G' not in self.frame
def test_reindex_fill_value(self):
rng = bdate_range('20110110', periods=20)
result = self.zframe.reindex(rng, fill_value=0)
exp = self.zorig.reindex(rng, fill_value=0)
exp = exp.to_sparse(self.zframe.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_reindex_method(self):
sparse = SparseDataFrame(data=[[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# Over indices
# default method
result = sparse.reindex(index=range(6))
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[nan, nan, nan],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
result = sparse.reindex(index=range(6), method='bfill')
expected = SparseDataFrame(data=[[11., 12., 14.],
[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='ffill'
result = sparse.reindex(index=range(6), method='ffill')
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# Over columns
# default method
result = sparse.reindex(columns=range(6))
expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan],
[nan, 21., 22., nan, 24., nan],
[nan, 41., 42., nan, 44., nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='bfill')
# method='ffill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='ffill')
def test_take(self):
result = self.frame.take([1, 0, 2], axis=1)
expected = self.frame.reindex(columns=['B', 'A', 'C'])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(self):
def _check(frame, orig):
dense_dm = frame.to_dense()
tm.assert_frame_equal(frame, dense_dm)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
self._check_all(_check)
def test_stack_sparse_frame(self):
with catch_warnings(record=True):
def _check(frame):
dense_frame = frame.to_dense() # noqa
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
tm.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(self.frame)
_check(self.iframe)
# for now
pytest.raises(Exception, _check, self.zframe)
pytest.raises(Exception, _check, self.fill_frame)
def test_transpose(self):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
tm.assert_frame_equal(frame.T.to_dense(), orig.T)
tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
self._check_all(_check)
def test_shift(self):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq='B')
exp = orig.shift(2, freq='B')
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=BDay())
exp = orig.shift(2, freq=BDay())
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
self._check_all(_check)
def test_count(self):
dense_result = self.frame.to_dense().count()
result = self.frame.count()
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=None)
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=0)
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=1)
dense_result = self.frame.to_dense().count(axis=1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def _check_all(self, check_func):
check_func(self.frame, self.orig)
check_func(self.iframe, self.iorig)
check_func(self.zframe, self.zorig)
check_func(self.fill_frame, self.fill_orig)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.transpose, sdf, axes=1)
def test_combine_first(self):
df = self.frame
result = df[::2].combine_first(df)
result2 = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, result2)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self):
df = self.frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
assert len(r2.sp_values) == len(r1.sp_values)
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
df_blocks = df.blocks
assert list(df_blocks.keys()) == ['float64']
tm.assert_frame_equal(df_blocks['float64'], df)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
assert np.isnan(nan_colname_sparse.columns[0])
def test_isnull(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isnull()
exp = pd.SparseDataFrame({'A': [True, True, False, False, True],
'B': [False, True, True, False, True]},
default_fill_value=True)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isnull()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [False, False, False, False, True],
'B': [False, True, False, False, True]})
tm.assert_frame_equal(res.to_dense(), exp)
def test_isnotnull(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isnotnull()
exp = pd.SparseDataFrame({'A': [False, False, True, True, False],
'B': [True, False, False, True, False]},
default_fill_value=False)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isnotnull()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [True, True, True, True, False],
'B': [True, False, True, True, False]})
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize('index', [None, list('ab')]) # noqa: F811
@pytest.mark.parametrize('columns', [None, list('cd')])
@pytest.mark.parametrize('fill_value', [None, 0, np.nan])
@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16])
def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
# GH 4343
tm.skip_if_no_package('scipy')
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
arr = np.eye(2, dtype=dtype)
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = pd.SparseDataFrame(spm, index=index, columns=columns,
default_fill_value=fill_value)
# Expected result construction is kind of tricky for all
# dtype-fill_value combinations; easiest to cast to something generic
# and except later on
rarr = arr.astype(object)
rarr[arr == 0] = np.nan
expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna(
fill_value if fill_value is not None else np.nan)
# Assert frame is as expected
sdf_obj = sdf.astype(object)
tm.assert_sp_frame_equal(sdf_obj, expected)
tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
# Assert spmatrices equal
assert dict(sdf.to_coo().todok()) == dict(spm.todok())
# Ensure dtype is preserved if possible
was_upcast = ((fill_value is None or is_float(fill_value)) and
not is_object_dtype(dtype) and
not is_float_dtype(dtype))
res_dtype = (bool if is_bool_dtype(dtype) else
float if was_upcast else
dtype)
tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)})
assert sdf.to_coo().dtype == res_dtype
# However, adding a str column results in an upcast to object
sdf['strings'] = np.arange(len(sdf)).astype(str)
assert sdf.to_coo().dtype == np.object_
@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811
def test_from_to_scipy_object(spmatrix, fill_value):
# GH 4343
dtype = object
columns = list('cd')
index = list('ab')
tm.skip_if_no_package('scipy', max_version='0.19.0')
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
arr = np.eye(2, dtype=dtype)
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = pd.SparseDataFrame(spm, index=index, columns=columns,
default_fill_value=fill_value)
# Expected result construction is kind of tricky for all
# dtype-fill_value combinations; easiest to cast to something generic
# and except later on
rarr = arr.astype(object)
rarr[arr == 0] = np.nan
expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna(
fill_value if fill_value is not None else np.nan)
# Assert frame is as expected
sdf_obj = sdf.astype(object)
tm.assert_sp_frame_equal(sdf_obj, expected)
tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
# Assert spmatrices equal
assert dict(sdf.to_coo().todok()) == dict(spm.todok())
# Ensure dtype is preserved if possible
res_dtype = object
tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)})
assert sdf.to_coo().dtype == res_dtype
class TestSparseDataFrameArithmetic(object):
def test_numeric_op_scalar(self):
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
def test_comparison_op_scalar(self):
# GH 13001
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
# comparison changes internal repr, compare with dense
res = sparse > 1
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df > 1)
res = sparse != 0
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df != 0)
class TestSparseDataFrameAnalytics(object):
def setup_method(self, method):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=float),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.frame = SparseDataFrame(self.data, index=self.dates)
def test_cumsum(self):
expected = SparseDataFrame(self.frame.to_dense().cumsum())
result = self.frame.cumsum()
tm.assert_sp_frame_equal(result, expected)
result = self.frame.cumsum(axis=None)
tm.assert_sp_frame_equal(result, expected)
result = self.frame.cumsum(axis=0)
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self):
result = np.cumsum(self.frame)
expected = SparseDataFrame(self.frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.frame, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var',
'mean', 'prod', 'cumprod',
'std', 'min', 'max']
for func in funcs:
getattr(np, func)(self.frame)
|
mit
|
rubikloud/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
yask123/scikit-learn
|
sklearn/linear_model/bayes.py
|
220
|
15248
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
|
bsd-3-clause
|
telefar/stockEye
|
coursera-compinvest1-master/coursera-compinvest1-master/homework/homework/homework1/homework1.py
|
1
|
3459
|
'''
(c) 2013 Remy Marquis
Computational Investing @ Georgia Tech
Homework 1
Write a Python function that can simulate and assess the performance of a 4 stock portfolio.
'''
# Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def simulate(dt_start, dt_end, ls_symbols, ls_allocation):
'''
Simulate function
'''
# Get closing prices (hours=16)
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Open Yahoo data set and read (adjusted) closing price
ls_keys = ['close']
c_dataobj = da.DataAccess('Yahoo')
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Compute portfolio value
tmp = d_data['close'].values.copy()
d_normal = tmp / tmp[0,:]
alloc = np.array(ls_allocation).reshape(4,1)
portVal = np.dot(d_normal, alloc)
# Compute daily returns
dailyVal = portVal.copy()
tsu.returnize0(dailyVal)
# Compute statistics
daily_ret = np.mean(dailyVal)
vol = np.std(dailyVal)
sharpe = np.sqrt(252) * daily_ret / vol
cum_ret = portVal[portVal.shape[0] -1][0]
# return
return vol, daily_ret, sharpe, cum_ret
def print_simulate( dt_start, dt_end, ls_symbols, ls_allocation ):
'''
Print results
'''
# print
vol, daily_ret, sharpe, cum_ret = simulate( dt_start, dt_end, ls_symbols, ls_allocation )
print "Start Date: ", dt_start
print "End Date: ", dt_end
print "Symbols: ", ls_symbols
print "Optimal Allocations: ", ls_allocation
print "Sharpe Ratio: ", sharpe
print "Volatility (stdev): ", vol
print "Average Daily Return: ", daily_ret
print "Cumulative Return: ", cum_ret
def optimal_allocation_4( dt_start, dt_end, ls_symbols ):
max_sharpe = -1
max_alloc = [0.0, 0.0, 0.0, 0.0]
for i in range(0,11):
for j in range(0,11-i):
for k in range(0,11-i-j):
for l in range (0,11-i-j-k):
if (i + j + l + k) == 10:
alloc = [float(i)/10, float(j)/10, float(k)/10, float(l)/10]
vol, daily_ret, sharpe, cum_ret = simulate( dt_start, dt_end, ls_symbols, alloc )
if sharpe > max_sharpe:
max_sharpe = sharpe
max_alloc = alloc
return max_alloc
def main():
'''
Main function
'''
# vol, daily_ret, sharpe, cum_ret = simulate(startdate, enddate, ['GOOG','AAPL','GLD','XOM'], [0.2,0.3,0.4,0.1])
#ls_symbols = ['AAPL', 'GLD', 'GOOG', 'XOM']
#ls_symbols = ['AXP', 'HPQ', 'IBM', 'HNZ']
#ls_symbols = ['BRCM', 'TXN', 'AMD', 'ADI']
ls_symbols = ['C', 'GS', 'IBM', 'HNZ']
#ls_allocations = [0.4, 0.4, 0.0, 0.2]
ls_allocations = [0.4, 0.4, 0.0, 0.2]
dt_start = dt.datetime(2011, 1, 1)
dt_end = dt.datetime(2011, 12, 31)
# sanity check
#rint_simulate(dt_start, dt_end, ls_symbols, ls_allocations)
max_alloc = optimal_allocation_4( dt_start, dt_end, ls_symbols )
print "---"
#print max_alloc
print_simulate( dt_start, dt_end, ls_symbols, max_alloc )
if __name__ == '__main__':
main()
|
bsd-3-clause
|
thilbern/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
spallavolu/scikit-learn
|
examples/ensemble/plot_forest_iris.py
|
335
|
6271
|
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
|
bsd-3-clause
|
btabibian/scikit-learn
|
sklearn/ensemble/tests/test_voting_classifier.py
|
15
|
14956
|
"""Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)],
weights=[1, 2])
msg = "Names provided are not unique: ['lr', 'lr']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr__', clf)])
msg = "Estimator names must not contain __: got ['lr__']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('estimators', clf)])
msg = "Estimator names conflict with constructor arguments: ['estimators']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
def test_set_params():
"""set_params should be able to set estimators"""
clf1 = LogisticRegression(random_state=123, C=1.0)
clf2 = RandomForestClassifier(random_state=123, max_depth=None)
clf3 = GaussianNB()
eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft',
weights=[1, 2])
eclf1.fit(X, y)
eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft',
weights=[1, 2])
eclf2.set_params(nb=clf2).fit(X, y)
assert_false(hasattr(eclf2, 'nb'))
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params())
assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params())
eclf1.set_params(lr__C=10.0)
eclf2.set_params(nb__max_depth=5)
assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0)
assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5)
assert_equal(eclf1.get_params()["lr__C"],
eclf1.get_params()["lr"].get_params()['C'])
def test_set_estimator_none():
"""VotingClassifier set_params should be able to set estimators as None"""
# Test predict
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 0, 0.5]).fit(X, y)
eclf2 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 1, 0.5])
eclf2.set_params(rf=None).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_true(dict(eclf2.estimators)["rf"] is None)
assert_true(len(eclf2.estimators_) == 2)
assert_true(all([not isinstance(est, RandomForestClassifier) for est in
eclf2.estimators_]))
assert_true(eclf2.get_params()["rf"] is None)
eclf1.set_params(voting='soft').fit(X, y)
eclf2.set_params(voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
msg = ('All estimators are None. At least one is required'
' to be a classifier!')
assert_raise_message(
ValueError, msg, eclf2.set_params(lr=None, rf=None, nb=None).fit, X, y)
# Test soft voting transform
X1 = np.array([[1], [2]])
y1 = np.array([1, 2])
eclf1 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[0, 0.5]).fit(X1, y1)
eclf2 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[1, 0.5])
eclf2.set_params(rf=None).fit(X1, y1)
assert_array_equal(eclf1.transform(X1), np.array([[[0.7, 0.3], [0.3, 0.7]],
[[1., 0.], [0., 1.]]]))
assert_array_equal(eclf2.transform(X1), np.array([[[1., 0.], [0., 1.]]]))
eclf1.set_params(voting='hard')
eclf2.set_params(voting='hard')
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
def test_estimator_weights_format():
# Test estimator weights inputs as list and array
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=[1, 2],
voting='soft')
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=np.array((1, 2)),
voting='soft')
eclf1.fit(X, y)
eclf2.fit(X, y)
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
|
bsd-3-clause
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/indexing.py
|
3
|
10176
|
from __future__ import absolute_import, division, print_function
from datetime import datetime
from collections import defaultdict
from toolz import merge
import bisect
import numpy as np
import pandas as pd
from .core import new_dd_object, Series
from . import methods
from ..base import tokenize
class _LocIndexer(object):
""" Helper class for the .loc accessor """
def __init__(self, obj):
self.obj = obj
@property
def _name(self):
return self.obj._name
def _make_meta(self, iindexer, cindexer):
"""
get metadata
"""
if cindexer is None:
return self.obj
else:
return self.obj._meta.loc[:, cindexer]
def __getitem__(self, key):
if isinstance(key, tuple):
# multi-dimensional selection
if len(key) > self.obj.ndim:
# raise from pandas
msg = 'Too many indexers'
raise pd.core.indexing.IndexingError(msg)
iindexer = key[0]
cindexer = key[1]
else:
# if self.obj is Series, cindexer is always None
iindexer = key
cindexer = None
return self._loc(iindexer, cindexer)
def _loc(self, iindexer, cindexer):
""" Helper function for the .loc accessor """
if isinstance(iindexer, Series):
return self._loc_series(iindexer, cindexer)
if self.obj.known_divisions:
iindexer = self._maybe_partial_time_string(iindexer)
if isinstance(iindexer, slice):
return self._loc_slice(iindexer, cindexer)
elif isinstance(iindexer, (list, np.ndarray)):
return self._loc_list(iindexer, cindexer)
else:
# element should raise KeyError
return self._loc_element(iindexer, cindexer)
else:
if isinstance(iindexer, (list, np.ndarray)):
# applying map_pattition to each partitions
# results in duplicated NaN rows
msg = 'Cannot index with list against unknown division'
raise KeyError(msg)
elif not isinstance(iindexer, slice):
iindexer = slice(iindexer, iindexer)
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.try_loc, iindexer, cindexer,
meta=meta)
def _maybe_partial_time_string(self, iindexer):
"""
Convert index-indexer for partial time string slicing
if obj.index is DatetimeIndex / PeriodIndex
"""
iindexer = _maybe_partial_time_string(self.obj._meta_nonempty.index,
iindexer, kind='loc')
return iindexer
def _loc_series(self, iindexer, cindexer):
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.loc, iindexer, cindexer,
token='loc-series', meta=meta)
def _loc_list(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, self.obj)
parts = self._get_partitions(iindexer)
meta = self._make_meta(iindexer, cindexer)
if len(iindexer):
dsk = {}
divisions = []
items = sorted(parts.items())
for i, (div, indexer) in enumerate(items):
dsk[name, i] = (methods.loc, (self._name, div),
indexer, cindexer)
# append minimum value as division
divisions.append(sorted(indexer)[0])
# append maximum value of the last division
divisions.append(sorted(items[-1][1])[-1])
else:
divisions = [None, None]
dsk = {(name, 0): meta.head(0)}
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=divisions)
def _loc_element(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, self.obj)
part = self._get_partitions(iindexer)
if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(iindexer))
dsk = {(name, 0): (methods.loc, (self._name, part),
slice(iindexer, iindexer), cindexer)}
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=[iindexer, iindexer])
def _get_partitions(self, keys):
if isinstance(keys, (list, np.ndarray)):
return _partitions_of_index_values(self.obj.divisions, keys)
else:
# element
return _partition_of_index_value(self.obj.divisions, keys)
def _coerce_loc_index(self, key):
return _coerce_loc_index(self.obj.divisions, key)
def _loc_slice(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, cindexer, self)
assert isinstance(iindexer, slice)
assert iindexer.step in (None, 1)
if iindexer.start is not None:
start = self._get_partitions(iindexer.start)
else:
start = 0
if iindexer.stop is not None:
stop = self._get_partitions(iindexer.stop)
else:
stop = self.obj.npartitions - 1
if iindexer.start is None and self.obj.known_divisions:
istart = self.obj.divisions[0]
else:
istart = self._coerce_loc_index(iindexer.start)
if iindexer.stop is None and self.obj.known_divisions:
istop = self.obj.divisions[-1]
else:
istop = self._coerce_loc_index(iindexer.stop)
if stop == start:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, iindexer.stop), cindexer)}
divisions = [istart, istop]
else:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, None), cindexer)}
for i in range(1, stop - start):
if cindexer is None:
dsk[name, i] = (self._name, start + i)
else:
dsk[name, i] = (methods.loc, (self._name, start + i),
slice(None, None), cindexer)
dsk[name, stop - start] = (methods.loc, (self._name, stop),
slice(None, iindexer.stop), cindexer)
if iindexer.start is None:
div_start = self.obj.divisions[0]
else:
div_start = max(istart, self.obj.divisions[start])
if iindexer.stop is None:
div_stop = self.obj.divisions[-1]
else:
div_stop = min(istop, self.obj.divisions[stop + 1])
divisions = ((div_start, ) +
self.obj.divisions[start + 1:stop + 1] +
(div_stop, ))
assert len(divisions) == len(dsk) + 1
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=divisions)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _partitions_of_index_values(divisions, values):
""" Return defaultdict of division and values pairs
Each key corresponds to the division which values are index values belong
to the division.
>>> sorted(_partitions_of_index_values([0, 5, 10], [3]).items())
[(0, [3])]
>>> sorted(_partitions_of_index_values([0, 5, 10], [3, 8, 5]).items())
[(0, [3]), (1, [8, 5])]
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
results = defaultdict(list)
values = pd.Index(values, dtype=object)
for val in values:
i = bisect.bisect_right(divisions, val)
div = min(len(divisions) - 2, max(0, i - 1))
results[div].append(val)
return results
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def _maybe_partial_time_string(index, indexer, kind):
"""
Convert indexer for partial string selection
if data has DatetimeIndex/PeriodIndex
"""
# do not pass dd.Index
assert isinstance(index, pd.Index)
if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
return indexer
if isinstance(indexer, slice):
if isinstance(indexer.start, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer.start, 'left', kind)
else:
start = indexer.start
if isinstance(indexer.stop, pd.compat.string_types):
stop = index._maybe_cast_slice_bound(indexer.stop, 'right', kind)
else:
stop = indexer.stop
return slice(start, stop)
elif isinstance(indexer, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer, 'left', 'loc')
stop = index._maybe_cast_slice_bound(indexer, 'right', 'loc')
return slice(min(start, stop), max(start, stop))
return indexer
|
gpl-3.0
|
larsjbro/FYS4150
|
project_4/source/ising2dim_visual_v3.py
|
1
|
19386
|
# coding=utf-8
# 2-dimensional ising model with visualization
# Written by Kyrre Ness Sjoebaek
from __future__ import division
import matplotlib.pyplot as plt
from numba import jit
import numpy
import numpy as np
import sys
import math
import pygame
from timeit import default_timer as timer
from scipy import integrate
from scipy import special
# Needed for visualize when using SDL
SCREEN = None
FONT = None
BLOCKSIZE = 10
T_CRITICAL = 2./ np.log(1+np.sqrt(2))
@jit(nopython=True)
def periodic(i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i + limit + add) % limit
def dump_to_terminal(spin_matrix, temp, E, M):
# Simple terminal dump
print "temp:", temp, "E:", E, "M:", M
print spin_matrix
def pretty_print_to_terminal(spin_matrix, temp, E, M):
# Pretty-print to terminal
out = ""
size = len(spin_matrix)
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
out += "X"
else:
out += " "
out += "\n"
print "temp:", temp, "E:", E, "M:", M
print out + "\n"
def display_single_pixel(spin_matrix, temp, E, M):
# SDL single-pixel (useful for large arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
SCREEN.set_at((x, y), (255, 255, 255))
else:
SCREEN.set_at((x, y), (0, 0, 0))
SCREEN.unlock()
pygame.display.flip()
def display_block(spin_matrix, temp, E, M):
# SDL block (usefull for smaller arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
SCREEN.unlock()
pygame.display.flip()
def display_block_with_data(spin_matrix, E, M):
# SDL block w/ data-display
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
s = FONT.render("<E> = %5.3E; <M> = %5.3E" % E, M, False, (255, 0, 0))
SCREEN.blit(s, (0, 0))
SCREEN.unlock()
pygame.display.flip()
def get_visualize_function(method):
vis_methods = {0: dump_to_terminal,
1:pretty_print_to_terminal,
2:display_single_pixel, # (useful for large arrays)
3:display_block, # (usefull for smaller arrays)
4:display_block_with_data}
def plot_nothing(spin_matrix, temp, E, M):
pass
return vis_methods.get(method, plot_nothing)
def visualize(spin_matrix, temp, E, M, method):
"""
Visualize the spin matrix
Methods:
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
get_visualize_function(method)(spin_matrix, temp, E, M)
@jit(nopython=True)
def metropolis(E, M, w, size, spin_matrix):
# Metropolis
# Loop over all spins, pick a random spin each time
number_of_accepted_configurations = 0
for s in xrange(size**2):
x = int(numpy.random.random() * size)
y = int(numpy.random.random() * size)
deltaE = 2 * spin_matrix[x, y] * (spin_matrix[periodic(x, size, -1), y]
+ spin_matrix[periodic(x, size, 1), y]
+ spin_matrix[x, periodic(y, size, -1)]
+ spin_matrix[x, periodic(y, size, 1)])
accept = numpy.random.random() <= w[deltaE + 8]
if accept:
spin_matrix[x, y] *= -1
M += 2 * spin_matrix[x, y]
E += deltaE
number_of_accepted_configurations += 1
return E, M, number_of_accepted_configurations
@jit(nopython=True)
def _compute_initial_energy(spin_matrix, size):
# Calculate initial energy
E = 0
for j in xrange(size):
for i in xrange(size):
E -= spin_matrix[i, j] * (spin_matrix[periodic(i, size, -1), j]
+ spin_matrix[i, periodic(j, size, 1)])
return E
def monteCarlo(temp, size, trials, visualizer=None, spin_matrix='ordered'):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for units Kb Kelvin / J
- size: dimension of square matrix
- trials: Monte-carlo trials (how many times do we
flip the matrix?)
- visual_method: What method should we use to visualize?
Output:
- E_av: Energy of matrix averaged over trials, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over trials
- Mabs_variance
- num_accepted_configs
"""
if visualizer is None:
visualizer = get_visualize_function(method=None) # No visualization
# Setup spin matrix, initialize to ground state
if spin_matrix == 'ordered':
spin_matrix = numpy.zeros((size, size), numpy.int8) + 1
elif spin_matrix == 'random':
spin_matrix = np.array(numpy.random.random(size=(size, size))>0.5, dtype=numpy.int8)
else:
raise NotImplementedError('method')
# Create and initialize variables
E_av = E2_av = M_av = M2_av = Mabs_av = 0.0
# Setup array for possible energy changes
w = numpy.zeros(17, dtype=float)
for de in xrange(-8, 9, 4): # include +8
w[de + 8] = math.exp(-de / temp)
# Calculate initial magnetization:
M = spin_matrix.sum()
E = _compute_initial_energy(spin_matrix, size)
total_accepted_configs = 0
# Start metropolis MonteCarlo computation
for i in xrange(trials):
E, M, num_accepted_configs = metropolis(E, M, w, size, spin_matrix)
# Update expectation values
total_accepted_configs += num_accepted_configs
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
visualizer(spin_matrix, temp, E / float(size**2), M / float(size**2))
# Normalize average values
E_av /= float(trials)
E2_av /= float(trials)
M_av /= float(trials)
M2_av /= float(trials)
Mabs_av /= float(trials)
# Calculate variance and normalize to per-point and temp
E_variance = (E2_av - E_av * E_av) / float(size * size * temp * temp)
M_variance = (M2_av - M_av * M_av) / float(size * size * temp)
Mabs_variance = (M2_av - Mabs_av * Mabs_av) / float(size * size * temp)
# Normalize returned averages to per-point
E_av /= float(size * size)
M_av /= float(size * size)
Mabs_av /= float(size * size)
return E_av, E_variance, M_av, M_variance, Mabs_av, Mabs_variance, total_accepted_configs
def initialize_pygame(size, method):
global SCREEN, FONT
# Initialize pygame
if method == 2 or method == 3 or method == 4:
pygame.init()
if method == 2:
SCREEN = pygame.display.set_mode((size, size))
elif method == 3:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
elif method == 4:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
FONT = pygame.font.Font(None, 12)
def partition2(T):
'''
Return the partition2 function for 2x2 lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
z = 12+4*np.cosh(8.0/T)
return z
def partition(T, size=2):
'''
Return the partition function for size x size lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
N = size**2
k1 = special.ellipk(kappa**2)
def energy_mean_asymptotic(T):
'''
Return mean energy for size x size lattice normalized by spins**2 * size **2
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
page 428 in lecture notes
'''
denominator = np.tanh(2.0/T)
kappa = 2*denominator/np.cosh(2./T)
k1 = special.ellipk(kappa**2)
# k = 1./np.sinh(2.0/T)**2
# def integrand(theta):
# return 1./np.sqrt(1-4.*k*(np.sin(theta)/(1+k))**2)
# k11, abserr = integrate.quad(integrand, 0, np.pi/2, epsabs=1e-3, epsrel=1e-3)
return -(1+ 2/np.pi *(2*denominator**2-1)*k1)/denominator
def energy_mean2(T):
'''
Return mean energy for 2 x 2 lattice normalized by spins**2 * 4
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
'''
size = 2
return -8*np.sinh(8.0/T)/(np.cosh(8.0/T)+3)/size**2
def energy_variance2(T):
'''
Return variance of energy for 2 x 2 lattice normalized by spins**2 * 4 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
size = 2
return 64.0*(1.+3.*np.cosh(8.0/T))/(np.cosh(8.0/T)+3)**2 / size**2 / T**2
def energy_variance(T):
'''
Return variance of energy for size x size lattice normalized by spins**2 * size**2 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
tanh2 = np.tanh(2./T)**2
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
# N = size**2
k1 = special.ellipk(kappa**2)
k2 = special.ellipe(kappa**2)
return 4/np.pi * (k1-k2 -(1-tanh2)*(np.pi/2+(2*tanh2-1)*k1))/tanh2/T**2
def energy_mean_and_variance2(temperature):
return energy_mean2(temperature), energy_variance2(temperature)
def specific_heat2(T):
return energy_variance2(T)
def magnetization_spontaneous_asymptotic(T):
""" Return spontaneous magnetization for size x size lattice normalized by spins**2 * size**2
for T < Tc= 2.269
"""
tanh2 = np.tanh(1./T)**2
return (1 - (1-tanh2)**4/(16*tanh2**2))**(1./8) # pp 429
# return (1 - 1./np.sinh(2./T)**4)**(1./8)
def magnetization_mean2(T):
'''
Output:
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2 per cell
'''
return np.where(T>T_CRITICAL, 0, magnetization_spontaneous_asymptotic(T))
def magnetization_variance2(T):
"""Return variance of magnetization for 2 x 2 lattice normalized by spins**2 * 4 * T"""
size = 2 * 2
denominator = np.cosh(8./T) + 3
mean = magnetization_mean2(T) * size
sigma = 8.0 * (np.exp(8./T) + 1) / denominator - mean**2
return sigma / size / T
def magnetization_mean_and_variance2(T):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
return magnetization_mean2(T), magnetization_variance2(T)
def susceptibility2(T):
'''
Output:
- M_variance: Variance of magnetic field, same normalization * temp
'''
return magnetization_variance2(T)
def magnetization_abs_mean2(T):
'''
Lattice 2x2
Output:
- Mabs: Absolute value of magnetic field, averaged over trials per cell
'''
size = 2
return (2*np.exp(8.0/T)+4)/(np.cosh(8.0/T)+3)/size**2
def magnetization_abs_mean_and_variance2(temperature):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
beta = 1. / temperature
size = 2
denominator = (np.cosh(8 * beta) + 3)
mean = 2 * (np.exp(8 * beta) + 2) / denominator
sigma = (8 * (np.exp(8 * beta) + 1) / denominator - mean**2) / temperature
return mean / size**2, sigma / size**2
def read_input():
"""
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
if len(sys.argv) == 5:
size = int(sys.argv[1])
trials = int(sys.argv[2])
temperature = float(sys.argv[3])
method = int(sys.argv[4])
else:
print "Usage: python", sys.argv[0],\
"lattice_size trials temp method"
sys.exit(0)
if method > 4:
print "method < 3!"
sys.exit(0)
return size, trials, temperature, method
def plot_abs_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
plt.loglog(trial_sizes, np.abs(data[:,i] - truth), label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Absolute error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def plot_rel_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
if truth == 0:
scale = 1e-3
else:
scale = np.abs(truth) + 1e-16
plt.loglog(trial_sizes, np.abs(data[:,i] - truth)/scale, label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Relative error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def compute_monte_carlo(temperature, size, trial_sizes, spin_matrix='ordered'):
data = []
for trials in trial_sizes:
print trials
t0 = timer()
data.append(monteCarlo(temperature, size, trials, spin_matrix=spin_matrix))
print 'elapsed time: {} seconds'.format(timer() - t0)
data = np.array(data)
names = ['Average Energy per spin $E/N$',
'Specific Heat per spin $C_V/N$',
'Average Magnetization per spin $M/N$',
'Susceptibility per spin $var(M)/N$',
'Average |Magnetization| per spin $|M|/N$',
'Variance of |Magnetization| per spin $var(|M|)/N$',
'Number of accepted configurations']
return data, names
def plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix='ordered'):
ids = [0, 4]
for i in ids:
name = names[i]
print i
plt.semilogx(trial_sizes, data[:,i], label=name)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('E or |M|')
plt.xlabel('Number of trials')
plt.legend()
def plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix='ordered'):
i = 6
# for i in ids:
name = names[i]
print i
plt.loglog(trial_sizes, data[:,i], label=name)
x = np.log(trial_sizes)
y = np.log(data[:, i])
mask = np.isfinite(y)
p = np.polyfit(x[mask], y[mask], deg=1)
sgn = '+' if p[1] > 0 else '-'
label_p = 'exp({:2.1f} {} {:2.1f} ln(x))'.format(p[0], sgn, abs(p[1]))
plt.loglog(trial_sizes, np.exp(np.polyval(p, x)), label=label_p)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('')
plt.xlabel('Number of trials')
plt.legend()
def main(size, trials, temperature, method):
initialize_pygame(size, method)
visualizer = get_visualize_function(method)
(E_av, E_variance, M_av, M_variance, Mabs_av) = monteCarlo(temperature, size, trials, visualizer)
print "T=%15.8E E[E]=%15.8E Var[E]=%15.8E E[M]=%15.8E Var[M]=%15.8E E[|M|]= %15.8E\n" % (temperature, E_av, E_variance, M_av, M_variance, Mabs_av)
pygame.quit()
def task_b(temperatures=(1, 2.4)):
size = 2
trial_sizes = 10**np.arange(1, 6)
for temperature in temperatures:
data, names = compute_monte_carlo(temperature, size, trial_sizes)
truths = (energy_mean_and_variance2(temperature)
+ magnetization_mean_and_variance2(temperature)
+ magnetization_abs_mean_and_variance2(temperature))
plt.figure()
plot_abs_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_abserr_T{}_size{}.png'.format(temperature, 2))
plt.figure()
plot_rel_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_relerr_T{}_size{}.png'.format(temperature, 2))
# plt.show('hold')
def task_c(temperatures=(1,)):
trial_sizes = [10, 30, 100, 300, 1000, 3000, 10000, 30000, 100000] # 10**np.arange(1, 5)
for temperature in temperatures:
print temperature
size = 20
for spin_matrix in ['ordered', 'random']:
data, names = compute_monte_carlo(temperature, size, trial_sizes, spin_matrix)
plt.figure()
plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
plt.figure()
plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_accepted_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
# plt.show('hold')
if __name__ == '__main__':
# T=2.4
#
# print energy_mean2(T), energy_mean_asymptotic(T), magnetization_spontaneous_asymptotic(T), magnetization_mean2(T)
# print energy_variance2(T)/energy_variance(T)
# task_b(temperatures=(1,2.4))
# task_c(temperatures=(1, 2.0, 2.4, 5, 10))
task_c(temperatures=(1, 2.4))
plt.show('hold')
# Main program
# # Get input
# # size, trials, temperature, method = read_input()
# size = 2
# trials = 1000
# temperature = 4
# method = -1
#
# main(size, trials, temperature, method)
# print energy_mean_and_variance2(temperature)
# print magnetization_mean_and_variance2(temperature)
# print magnetization_abs_mean_and_variance2(temperature)
|
bsd-2-clause
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/research/skip_thoughts/skip_thoughts/vocabulary_expansion.py
|
18
|
7370
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute an expanded vocabulary of embeddings using a word2vec model.
This script loads the word embeddings from a trained skip-thoughts model and
from a trained word2vec model (typically with a larger vocabulary). It trains a
linear regression model without regularization to learn a linear mapping from
the word2vec embedding space to the skip-thoughts embedding space. The model is
then applied to all words in the word2vec vocabulary, yielding vectors in the
skip-thoughts word embedding space for the union of the two vocabularies.
The linear regression task is to learn a parameter matrix W to minimize
|| X - Y * W ||^2,
where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],
Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a
matrix of shape [dim2, dim1].
This is based on the "Translation Matrix" method from the paper:
"Exploiting Similarities among Languages for Machine Translation"
Tomas Mikolov, Quoc V. Le, Ilya Sutskever
https://arxiv.org/abs/1309.4168
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import gensim.models
import numpy as np
import sklearn.linear_model
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("skip_thoughts_model", None,
"Checkpoint file or directory containing a checkpoint "
"file.")
tf.flags.DEFINE_string("skip_thoughts_vocab", None,
"Path to vocabulary file containing a list of newline-"
"separated words where the word id is the "
"corresponding 0-based index in the file.")
tf.flags.DEFINE_string("word2vec_model", None,
"File containing a word2vec model in binary format.")
tf.flags.DEFINE_string("output_dir", None, "Output directory.")
tf.logging.set_verbosity(tf.logging.INFO)
def _load_skip_thoughts_embeddings(checkpoint_path):
"""Loads the embedding matrix from a skip-thoughts model checkpoint.
Args:
checkpoint_path: Model checkpoint file or directory containing a checkpoint
file.
Returns:
word_embedding: A numpy array of shape [vocab_size, embedding_dim].
Raises:
ValueError: If no checkpoint file matches checkpoint_path.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_file:
raise ValueError("No checkpoint file found in %s" % checkpoint_path)
else:
checkpoint_file = checkpoint_path
tf.logging.info("Loading skip-thoughts embedding matrix from %s",
checkpoint_file)
reader = tf.train.NewCheckpointReader(checkpoint_file)
word_embedding = reader.get_tensor("word_embedding")
tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s",
word_embedding.shape)
return word_embedding
def _load_vocabulary(filename):
"""Loads a vocabulary file.
Args:
filename: Path to text file containing newline-separated words.
Returns:
vocab: A dictionary mapping word to word id.
"""
tf.logging.info("Reading vocabulary from %s", filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode="r") as f:
for i, line in enumerate(f):
word = line.decode("utf-8").strip()
assert word not in vocab, "Attempting to add word twice: %s" % word
vocab[word] = i
tf.logging.info("Read vocabulary of size %d", len(vocab))
return vocab
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
"""Runs vocabulary expansion on a skip-thoughts model using a word2vec model.
Args:
skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,
skip_thoughts_embedding_dim].
skip_thoughts_vocab: A dictionary of word to id.
word2vec: An instance of gensim.models.Word2Vec.
Returns:
combined_emb: A dictionary mapping words to embedding vectors.
"""
# Find words shared between the two vocabularies.
tf.logging.info("Finding shared words")
shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]
# Select embedding vectors for shared words.
tf.logging.info("Selecting embeddings for %d shared words", len(shared_words))
shared_st_emb = skip_thoughts_emb[[
skip_thoughts_vocab[w] for w in shared_words
]]
shared_w2v_emb = word2vec[shared_words]
# Train a linear regression model on the shared embedding vectors.
tf.logging.info("Training linear regression model")
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
# Create the expanded vocabulary.
tf.logging.info("Creating embeddings for expanded vocabuary")
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
# Ignore words with underscores (spaces).
if "_" not in w:
w_emb = model.predict(word2vec[w].reshape(1, -1))
combined_emb[w] = w_emb.reshape(-1)
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb))
return combined_emb
def main(unused_argv):
if not FLAGS.skip_thoughts_model:
raise ValueError("--skip_thoughts_model is required.")
if not FLAGS.skip_thoughts_vocab:
raise ValueError("--skip_thoughts_vocab is required.")
if not FLAGS.word2vec_model:
raise ValueError("--word2vec_model is required.")
if not FLAGS.output_dir:
raise ValueError("--output_dir is required.")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load the skip-thoughts embeddings and vocabulary.
skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)
skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)
# Load the Word2Vec model.
word2vec = gensim.models.Word2Vec.load_word2vec_format(
FLAGS.word2vec_model, binary=True)
# Run vocabulary expansion.
embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,
word2vec)
# Save the output.
vocab = embedding_map.keys()
vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt")
with tf.gfile.GFile(vocab_file, "w") as f:
f.write("\n".join(vocab))
tf.logging.info("Wrote vocabulary file to %s", vocab_file)
embeddings = np.array(embedding_map.values())
embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy")
np.save(embeddings_file, embeddings)
tf.logging.info("Wrote embeddings file to %s", embeddings_file)
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
|
reuk/wayverb
|
demo/evaluation/room_materials/rt60.py
|
2
|
1970
|
#!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from string import split
import scipy.signal as signal
import wave
import math
import os
import re
import json
import sys
sys.path.append('python')
def get_frequency_rt30_tuple(line):
split = line.split()
return (float(split[0]), float(split[6]))
def read_rt30(fname):
with open(fname) as f:
lines = f.readlines()
return [get_frequency_rt30_tuple(line) for line in lines[14:22]]
def main():
files = [
("0.02", "0.02.txt"),
("0.04", "0.04.txt"),
("0.08", "0.08.txt"),
]
for label, fname in files:
tuples = read_rt30(fname)
x = [freq for freq, _ in tuples]
y = [time for _, time in tuples]
min_time = min(y)
max_time = max(y)
average = (max_time - min_time) * 100.0 / ((max_time + min_time) * 0.5)
print('file: {}, min: {}, max: {}, average: {}'.format(
fname, min_time, max_time, average))
plt.plot(x, y, label=label, marker='o', linestyle='--')
plt.xscale('log')
plt.axvline(x=500)
plt.annotate(xy=(520, 1.4), s='waveguide cutoff')
plt.legend(loc='lower center', ncol=3, bbox_to_anchor=(0, -0.05, 1, 1), bbox_transform=plt.gcf().transFigure)
plt.title('Octave-band T30 Measurements for Different Surface Absorption Coefficients')
plt.xlabel('frequency / Hz')
plt.ylabel('time / s')
plt.tight_layout()
#plt.subplots_adjust(top=0.9)
plt.show()
if render:
plt.savefig('room_absorption_rt30.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
|
gpl-2.0
|
amolkahat/pandas
|
pandas/io/formats/latex.py
|
4
|
9407
|
# -*- coding: utf-8 -*-
"""
Module for formatting output data in Latex.
"""
from __future__ import print_function
import numpy as np
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import TableFormatter
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See Also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=self.frame.columns,
idx=self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False, sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names, na_rep=self.fmt.na_rep
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]]
out = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[' ' * len(i[-1])] * clevels + i for i in out]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else '{}' for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, '
'not {typ}'.format(typ=type(column_format)))
if not self.longtable:
buf.write('\\begin{{tabular}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
else:
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next '
'page}}}} \\\\\n'.format(n=len(row)))
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash ')
.replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde ')
.replace('^', '\\textasciicircum ')
.replace('&', '\\&')
if (x and x != '{}') else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = ['\\textbf{{{x}}}'.format(x=x)
if j < ilevels and x.strip() not in ['', '{}'] else x
for j, x in enumerate(crow)]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
.format(ncol=ncol, fmt=self.multicolumn_format,
txt=coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
nrow=nrow, row=row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
.format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
|
bsd-3-clause
|
MrCodeYu/spark
|
python/pyspark/sql/context.py
|
3
|
22432
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
def __init__(self, sparkContext, jhiveContext=None):
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
yanchen036/tensorflow
|
tensorflow/examples/learn/iris_custom_decay_dnn.py
|
43
|
3572
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
raghavrv/scikit-learn
|
examples/ensemble/plot_isolation_forest.py
|
39
|
2361
|
"""
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of normality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e417.py
|
2
|
6371
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-6,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
net.load_params(5000)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
IshankGulati/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
33
|
17877
|
import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
92
|
4535
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
|
mit
|
cojacoo/testcases_echoRD
|
gen_test_coR5.py
|
1
|
4305
|
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_column
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_g63_nomac',experimental=True)
mc = mcp.mcpick_out(mc,'g63_nomac.pickle')
runname='gen_test_coR5'
idx=int(np.shape(mc.soilgrid)[1]/2)
mc.soilgrid[:,idx-1:idx+1]=13
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart-=340
precTS.tend-=340
precTS.intense=2.*0.063*60./1000.# intensity in m3/s
#use modified routines for binned retention definitions
mc.part_sizefac=200
mc.gridcellA=abs(mc.mgrid.vertfac*mc.mgrid.latfac)
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
#for column:
total_volume=np.pi*0.5**3
mc.particleV=total_volume/(mc.mgrid.vertgrid[0]*mc.mgrid.latgrid[0]*(2*mc.part_sizefac))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects='column2'
mc.colref=True
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_column(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/X',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([leftover,drained,t,TSstore,i]), handle, protocol=2)
|
gpl-3.0
|
Didou09/tofu
|
examples/tutorials/tuto_plot_basic.py
|
1
|
2347
|
"""
Getting started: 5 minutes tutorial for `tofu`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is a tutorial that aims to get a new user a little familiar with tofu's structure.
"""
# The following imports matplotlib, preferably using a
# backend that allows the plots to be interactive (Qt5Agg).
import numpy as np
import matplotlib
try:
matplotlib.use('Qt5Agg')
except ImportError:
matplotlib.use(matplotlib.rcParams['backend'])
import matplotlib.pyplot
###############################################################################
# We start by loading `tofu`. You might see some warnings at this stage since
# optional modules for `tofu` could
# be missing on the machine you are working on. This can be ignored safely.
import tofu as tf
###############################################################################
# We can now create our first configuration.
# In `tofu` speak, a configuration is the geometry of the device and its
# structures. `tofu` provides pre-defined ones for your to try, so we're going
# to do just that:
configB2 = tf.load_config("B2")
###############################################################################
# The configuration can easily be visualized using the `.plot()` method:
configB2.plot()
###############################################################################
# Since `tofu` is all about tomography, let's create a 1D camera and plot its
# output.
cam1d = tf.geom.utils.create_CamLOS1D(
config=configB2,
pinhole=[3.4, 0, 0],
sensor_nb=100,
focal=0.1,
sensor_size=0.1,
orientation=[np.pi, 0, 0],
Name="",
Exp="",
Diag="",
)
# interactive plot
cam1d.plot_touch()
###############################################################################
# The principle is similar for 2D cameras.
cam2d = tf.geom.utils.create_CamLOS2D(
config=configB2,
pinhole=[3.4, 0, 0],
sensor_nb=100,
focal=0.1,
sensor_size=0.1,
orientation=[np.pi, 0, 0],
Name="",
Exp="",
Diag="",
)
cam2d.plot_touch()
###############################################################################
# What comes next is up to you!
# You could now play with the function parameters (change the cameras
# direction, refinement, aperture),
# with the plots (many are interactive) or create you own tomographic
# configuration.
|
mit
|
Winterflower/mdf
|
mdf/regression/differs.py
|
3
|
18094
|
"""
Classes for collecting data and determining differences,
for use with mdf.regression.run.
"""
import sys
import os
from ..builders import DataFrameBuilder
from ..nodes import MDFNode
import numpy as np
import pandas as pa
import xlwt
import logging
from datetime import datetime
_log = logging.getLogger(__name__)
if sys.version_info[0] > 2:
basestring = str
def _to_range(row, col, sheet=None):
"""returns an Excel range string, e.g. 0, 0 => A1"""
cell = ""
while col >= 26:
cell = "%s%s" % (chr(ord("A") + (col % 26)), cell)
col = (col // 26) - 1
cell = "%s%s" % (chr(ord("A") + (col % 26)), cell)
cell += "%d" % (row + 1)
if sheet is not None:
cell = "%s!%s" % (sheet.name, cell)
return cell
class Differ(object):
"""
Differ objects are the same a mdf builders with the
addition that they have the ability to diff result
sets.
When mdf.regression.run is called, each differ
object is called for each date in the regression
with the context being evaluated.
When finished, lhs.diff(rhs, lhs_ctx, rhs_ctx) is
called to diff the data collected in the regression
differ object.
"""
def diff(self, rhs, lhs_ctx, rhs_ctx):
"""
returns a tuple:
(is_different, brief_description, long_description, filename)
brief_description should be a one line human readable string
that describes any differences found, suitable for inclusion
in a diff report.
long_description describes any difference found in more detail
and may be included in the details section of a regression
report.
filename may be None or the name of a file containing a more
detailed diff report.
"""
raise NotImplementedError
class DataFrameDiffer(DataFrameBuilder, Differ):
"""
Subclass of Differ and DataFrameBuilder to be used
for collecting a dataframe of node values and then
comparing with another.
"""
def __init__(self, nodes, dtype=object, sparse_fill_value=None, xls_filename=None):
self.nodes_or_names = nodes
nodes = self._get_nodes(nodes)
DataFrameBuilder.__init__(self, nodes, dtype=dtype, sparse_fill_value=sparse_fill_value)
Differ.__init__(self)
self.__tolerances = {}
self.__xls_filename = xls_filename
#
# This is so nodes can be passed in as a list of names (including module/package/class)
# and the nodes will be found using that, instead of passing a node instance in.
#
# When the differ is pickled the node may exist in the target environment but the pickled
# node format remembers what class the node was implemented on as well as the class it's
# bound too. If that's different in two instances it won't find the node and so the
# regression will fail. By always getting the nodes by name it will get the correct node.
#
def _get_nodes(self, nodes_or_names):
nodes = []
for n in nodes_or_names:
if isinstance(n, basestring) and "." in n:
name = n
# import the module if necessary
components = name.split(".")
modulename = components[0]
try:
__import__(modulename)
except ImportError:
pass
module = sys.modules.get(modulename, None)
while modulename in sys.modules and len(components) > 1:
module = sys.modules[modulename]
components.pop(0)
modulename = ".".join((modulename, components[0]))
try:
__import__(modulename)
except ImportError:
pass
if not components or not module:
raise Exception("Node not found: '%s'" % name)
# get the class and then the node from the module
obj = module
while components:
attr = components.pop(0)
obj = getattr(obj, attr)
n = obj
# check by this point we have a node
if not isinstance(n, MDFNode):
raise Exception("Node not found: %s" % n)
nodes.append(n)
return nodes
def __setstate__(self, state):
# fix up the nodes again from their names
state["nodes"] = self._get_nodes(state["nodes_or_names"])
self.__dict__.update(state)
def __getstate__(self):
# don't pickle the nodes - get them again when unpickling
state = dict(self.__dict__)
state.pop("nodes", None)
return state
def set_tolerance(self, tolerance, abs=True, node=None):
"""
Sets the tolerance for comparing values.
When abs is True a difference is considered
significant when::
abs(lhs - rhs) > tolerance
Or if abs is False a difference is considered
significant when::
abs((lhs / rhs) - 1.0) > tolerance
If node is None the tolerance applies to all values,
othereise the tolerance only applies to values
derived from that specific node.
"""
assert node is None or node in self.nodes
self.__tolerances[node] = (tolerance, abs)
def get_tolerance(self, node=None):
"""
returns the tolerance set using set_tolerance
as a tuple (tolerance, abs)
"""
tolerance = self.__tolerances.get(node, None)
if tolerance is None:
tolerance = self.__tolerances.get(None, (0.0, True))
return tolerance
def diff(self, other, ctx, other_ctx):
"""
Returns a tuple (is_different, brief_description, long_description, detail_filename)
that describes the difference between self and other.
If applicable and if a path was passed to the ctor then additional details
describing differences will be written to a file, and that filename is
returned as part of the diff.
"""
lhs_data = self.get_dataframe(ctx)
rhs_data = other.get_dataframe(other_ctx)
# Coerce python datetime indexes to pandas DatetimeIndex
# TODO: Remove this once pandas 0.7.3 compatibility is no longer needed
def _coerce_dt_index(index):
if len(index) > 0 and (not isinstance(index, pa.DatetimeIndex)):
# If first and last index entries are python datetimes, assume that the index contains only datetimes
if isinstance(index[0], datetime) and isinstance(index[-1], datetime):
return pa.DatetimeIndex(index)
# Return the original index if no modifications were done
return index
lhs_data.index = _coerce_dt_index(lhs_data.index)
rhs_data.index = _coerce_dt_index(rhs_data.index)
# diff each node's values individually
is_different = False
brief_description = ""
long_description = ""
different_nodes = []
details_filename = None
def _cols_are_similar(lhs_col, rhs_col):
lhs_col, rhs_col = str(lhs_col), str(rhs_col)
if "." in lhs_col:
unused, lhs_col = lhs_col.rsplit(".", 1)
if "." in rhs_col:
unused, rhs_col = rhs_col.rsplit(".", 1)
return lhs_col.lower() == rhs_col.lower()
for node in self.nodes:
lhs_columns = sorted(self.get_columns(node, ctx))
rhs_columns = sorted(other.get_columns(node, other_ctx))
# check the columns are the same
if len(lhs_columns) != len(rhs_columns) \
or (np.array(lhs_columns) != np.array(rhs_columns)).any():
is_different = True
description = "%s has column differences" % node.name
description += "\n" + "-" * len(description) + "\n\n"
max_columns = max(len(lhs_columns), len(rhs_columns))
lhs_tmp_cols = list(lhs_columns) + [None] * (max_columns - len(lhs_columns))
rhs_tmp_cols = list(rhs_columns) + [None] * (max_columns - len(rhs_columns))
cols_are_similar = len(lhs_columns) == len(rhs_columns)
for i, (lhs_col, rhs_col) in enumerate(zip(lhs_tmp_cols, rhs_tmp_cols)):
if lhs_col != rhs_col:
description += "%d: %s != %s\n" % (i, lhs_col, rhs_col)
if not _cols_are_similar(lhs_col, rhs_col):
cols_are_similar = False
long_description += description + "\n\n"
# if the cols aren't even similar skip the rest of the checks
if not cols_are_similar:
long_description += "**Not diffing data because of column differences**\n\n"
different_nodes.append(node)
continue
lhs_df = lhs_data[lhs_columns]
rhs_df = rhs_data[rhs_columns]
# check the indices are the same
if (np.array(lhs_df.index) != np.array(rhs_df.index)).any():
is_different = True
different_nodes.append(node)
mask = np.array(rhs_data.index) != np.array(lhs_data.index)
lhs_diff_dates = lhs_data.index[mask]
rhs_diff_dates = rhs_data.index[mask]
description = "%s has index differences" % node.name
description += "\n" + "-" * len(description) + "\n\n"
description += "indexes are different starting at %s != %s" % (
lhs_diff_dates[0],
rhs_diff_dates[0])
long_description += description + "\n\n"
continue
#
# columns and indices are the same so check the contents
#
try:
lhs_df = lhs_df.astype(float)
except TypeError:
pass
try:
rhs_df = rhs_df.astype(float)
except TypeError:
pass
tolerance, is_abs = self.get_tolerance(node)
if is_abs:
diffs = np.abs(lhs_df - rhs_df)
mask = (diffs > tolerance).values
else:
diffs = np.abs((lhs_df / rhs_df) - 1.0)
mask = (diffs > tolerance).values
# don't include differences where both sides are NaN or 0.0
try:
mask &= ~((lhs_df == 0.0) & (rhs_df == 0.0)).values
mask &= ~(np.isnan(lhs_df) & np.isnan(rhs_df)).values
except TypeError:
pass
# do include differences where one side is NaN but the other isn't
try:
mask |= np.isnan(lhs_df).values & ~np.isnan(rhs_df).values
mask |= np.isnan(rhs_df).values & ~np.isnan(lhs_df).values
except TypeError:
pass
if mask.any():
is_different = True
different_nodes.append(node)
row_mask = np.apply_along_axis(np.any, 1, mask)
diffs = diffs[row_mask]
description = "%s has %d differences" % (node.name, len(diffs.index))
description += "\n" + "-" * len(description) + "\n\n"
description += "tolerance = %f%s\n\n" % (
tolerance if is_abs else tolerance * 100.0,
"%" if not is_abs else "")
lhs_diffs = lhs_df[row_mask]
rhs_diffs = rhs_df[row_mask]
# convert the lhs and rhs to strings
lhs_lines = lhs_diffs.to_string().splitlines()
rhs_lines = rhs_diffs.to_string().splitlines()
# pad so they're the same length
lhs_lines += ["" * max(len(rhs_lines) - len(lhs_lines), 0)]
rhs_lines += ["" * max(len(lhs_lines) - len(rhs_lines), 0)]
max_lines = 10
mid = min(len(lhs_lines), max_lines) // 2
# format them on the same lines
lines = []
fmt = "%%-%ds %%-2s %%s" % max([len(x) for x in lhs_lines])
for i, (l, r) in enumerate(zip(lhs_lines, rhs_lines)):
if i == mid:
lines.append(fmt % (l, "!=", r))
else:
lines.append(fmt % (l, " ", r))
description += "\n".join(lines[:max_lines])
if len(lines) > max_lines:
description += "\n..."
long_description += description + "\n\n"
if is_different:
node_names = [x.short_name for x in different_nodes]
_log.debug("Differences found in nodes: %s" % ", ".join(node_names))
if len(different_nodes) == 0:
brief_description = "No data differences"
long_description += "No data differences\n\n"
elif len(different_nodes) == 1:
brief_description = "%s has differences" % node_names[0]
else:
brief_description = ", ".join(node_names[:-1])
brief_description += " and %s have differences" % node_names[-1]
if self.__xls_filename and len(different_nodes) > 0:
_log.debug("Writing differences to Excel file '%s'" % self.__xls_filename)
details_filename = self.__xls_filename
self.__write_xls(other, different_nodes, lhs_data, rhs_data, details_filename, ctx, other_ctx)
return (is_different, brief_description, long_description, details_filename)
def __write_xls(self, rhs_differ, different_nodes, lhs_data, rhs_data, filename, lhs_ctx, rhs_ctx):
"""write the diffs to a spreadsheet"""
wb = xlwt.Workbook()
date_style = xlwt.easyxf(num_format_str='YYYY-MM-DD')
nsheets = 0
for node in different_nodes:
lhs_columns = sorted(self.get_columns(node, lhs_ctx))
lhs_df = lhs_data[lhs_columns]
rhs_columns = sorted(rhs_differ.get_columns(node, rhs_ctx))
rhs_df = rhs_data[rhs_columns]
if len(lhs_df.columns) > 255 or len(rhs_df.columns) > 255: # xlwt has a limit of 256 columns
# just dump data into two separate CSV if its too big for a nice XLS report
fname = "%s__%s" % (node.short_name, os.path.splitext(os.path.basename(filename))[0])
csv_fpath = os.path.join(os.path.dirname(filename), fname)
_log.info("Node %s has mare than 255 columns, can't use xlwt, writing CSV to "
"%s[_LHS|_RHS].csv" % (node.name, csv_fpath))
lhs_df.to_csv(csv_fpath+"_LHS.csv")
rhs_df.to_csv(csv_fpath+"_RHS.csv")
else:
_log.info("Writing Excel sheet for %s" % node.name)
nsheets += 1
diffs_ws = wb.add_sheet(("%s_DIFFS" % node.short_name)[-31:])
lhs_ws = wb.add_sheet(("%s_LHS" % node.short_name)[-31:])
rhs_ws = wb.add_sheet(("%s_RHS" % node.short_name)[-31:])
for ws, df in ((lhs_ws, lhs_df), (rhs_ws, rhs_df)):
for row, value in enumerate(df.index):
ws.write(row + 1, 0, value, date_style)
for col_i, col_name in enumerate(df.columns):
ws.write(0, col_i + 1, str(col_name))
col = df[col_name]
for row_i, value in enumerate(col):
if np.isnan(value):
ws.row(row_i + 1).set_cell_error(col_i + 1, "#NUM!")
else:
ws.write(row_i + 1, col_i + 1, value)
max_cols = max(len(lhs_columns), len(rhs_columns))
max_rows = max(len(lhs_df.index), len(rhs_df.index))
tolerance, is_abs = self.get_tolerance(node)
for row, value in enumerate(lhs_df.index):
diffs_ws.write(row + 1, 0,
xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % {
"l" : _to_range(row + 1, 0, lhs_ws),
"r" : _to_range(row + 1, 0, rhs_ws)}),
date_style)
for col_i, col_name in enumerate(lhs_df.columns):
diffs_ws.write(0, col_i + 1,
xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % {
"l" : _to_range(0, col_i + 1, lhs_ws),
"r" : _to_range(0, col_i + 1, rhs_ws)}))
for col_i in xrange(1, max_cols + 1):
for row_i in xrange(1, max_rows + 1):
if is_abs:
diffs_ws.write(row_i,
col_i,
xlwt.Formula("ABS(%s-%s)" % (_to_range(row_i, col_i, lhs_ws),
_to_range(row_i, col_i, rhs_ws))))
else:
diffs_ws.write(row_i,
col_i,
xlwt.Formula("ABS((%s/%s)-1)" % (_to_range(row_i, col_i, lhs_ws),
_to_range(row_i, col_i, rhs_ws))))
if nsheets:
wb.save(filename)
|
mit
|
hsiaoyi0504/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
242
|
5885
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
matthias-k/pysaliency
|
pysaliency/filter_datasets.py
|
1
|
8515
|
from __future__ import division, print_function
import numpy as np
from boltons.iterutils import chunked
from .datasets import create_subset
def train_split(stimuli, fixations, crossval_folds, fold_no, val_folds=1, test_folds=1, random=True, stratified_attributes=None):
return crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='train', stratified_attributes=stratified_attributes)
def validation_split(stimuli, fixations, crossval_folds, fold_no, val_folds=1, test_folds=1, random=True, stratified_attributes=None):
return crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='val', stratified_attributes=stratified_attributes)
def test_split(stimuli, fixations, crossval_folds, fold_no, val_folds=1, test_folds=1, random=True, stratified_attributes=None):
return crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='test', stratified_attributes=stratified_attributes)
def crossval_splits(stimuli, fixations, crossval_folds, fold_no, val_folds=1, test_folds=1, random=True, stratified_attributes=None):
return (
crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='train', stratified_attributes=stratified_attributes),
crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='val', stratified_attributes=stratified_attributes),
crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=val_folds, test_folds=test_folds, random=random, split='test', stratified_attributes=stratified_attributes),
)
def crossval_split(stimuli, fixations, crossval_folds, fold_no, val_folds=1, test_folds=1, random=True, split='train', stratified_attributes=None):
train_folds, val_folds, test_folds = get_crossval_folds(crossval_folds, fold_no, test_folds=test_folds, val_folds=val_folds)
if split == 'train':
folds = train_folds
elif split == 'val':
folds = val_folds
elif split == 'test':
folds = test_folds
else:
raise ValueError(split)
return _get_crossval_split(stimuli, fixations, crossval_folds, included_splits=folds, random=random, stratified_attributes=stratified_attributes)
def _get_crossval_split(stimuli, fixations, split_count, included_splits, random=True, stratified_attributes=None):
if stratified_attributes is not None:
return _get_stratified_crossval_split(stimuli, fixations, split_count, included_splits, random=random, stratified_attributes=stratified_attributes)
inds = list(range(len(stimuli)))
if random:
print("Using random shuffles for crossvalidation")
rst = np.random.RandomState(seed=42)
rst.shuffle(inds)
inds = list(inds)
size = int(np.ceil(len(inds) / split_count))
chunks = chunked(inds, size=size)
inds = []
for split_nr in included_splits:
inds.extend(chunks[split_nr])
stimuli, fixations = create_subset(stimuli, fixations, inds)
return stimuli, fixations
def _get_stratified_crossval_split(stimuli, fixations, split_count, included_splits, random=True, stratified_attributes=None):
from sklearn.model_selection import StratifiedKFold
labels = []
for attribute_name in stratified_attributes:
attribute_data = np.array(stimuli.attributes[attribute_name])
if attribute_data.ndim == 1:
attribute_data = attribute_data[:, np.newaxis]
labels.append(attribute_data)
labels = np.hstack(labels)
if labels.shape[1] > 1:
# StratifiedKFold doesn't support multiple labels
final_label_dict = {}
final_labels = []
for label in labels:
label = tuple(label)
if label not in final_label_dict:
final_label_dict[label] = len(final_label_dict)
final_labels.append(final_label_dict[label])
labels = np.array(final_labels)
X = np.ones((len(stimuli), 1))
rst = np.random.RandomState(42)
inds = []
k_fold = StratifiedKFold(n_splits=split_count, shuffle=random, random_state=rst)
for i, (train_index, test_index) in enumerate(k_fold.split(X, labels)):
if i in included_splits:
inds.extend(test_index)
stimuli, fixations = create_subset(stimuli, fixations, inds)
return stimuli, fixations
def create_train_folds(crossval_folds, val_folds, test_folds):
all_folds = list(range(crossval_folds))
if isinstance(val_folds, int):
val_folds = [val_folds]
if isinstance(test_folds, int):
test_folds = [test_folds]
train_folds = [f for f in all_folds if not (f in val_folds or f in test_folds)]
return train_folds, val_folds, test_folds
def get_crossval_folds(crossval_folds, crossval_no, test_folds=1, val_folds=1):
assert test_folds <= 1
if test_folds:
_test_folds = [crossval_no]
_val_folds = [(crossval_no - i - 1) % crossval_folds for i in range(val_folds)]
else:
assert val_folds == 1
_test_folds = [crossval_no]
_val_folds = [crossval_no]
_train_folds, _val_folds, _test_folds = create_train_folds(crossval_folds, _val_folds, _test_folds)
return _train_folds, _val_folds, _test_folds
def iterate_crossvalidation(stimuli, fixations, crossval_folds, val_folds=1, test_folds=1, random=True, stratified_attributes=None):
"""iterate over crossvalidation folds. Each fold will yield
train_stimuli, train_fixations, val_, test_stimuli, test_fixations
"""
kwargs = {
'crossval_folds': crossval_folds,
'val_folds': val_folds,
'test_folds': test_folds,
'random': random,
'stratified_attributes': stratified_attributes,
}
for fold_no in range(crossval_folds):
train_stimuli, train_fixations = train_split(
stimuli, fixations,
fold_no=fold_no,
**kwargs)
val_stimuli, val_fixations = validation_split(
stimuli, fixations,
fold_no=fold_no,
**kwargs)
test_stimuli, test_fixations = test_split(
stimuli, fixations,
fold_no=fold_no,
**kwargs)
yield train_stimuli, train_fixations, val_stimuli, val_fixations, test_stimuli, test_fixations
def parse_list_of_intervals(description):
"""parses a string as "1.0:3.0,5.0:5.6,7" into [(1.0, 3.0), (5.0, 5.6), (7,)]
"""
intervals = description.split(',')
return [parse_interval(interval) for interval in intervals]
def parse_interval(interval):
parts = interval.split(':')
if len(parts) not in [1, 2]:
raise ValueError("Invalid interval", interval)
return tuple([float(part.strip()) for part in parts])
def filter_fixations_by_number(fixations, intervals):
intervals = _check_intervals(intervals, type=int)
inds = np.zeros_like(fixations.x, dtype=bool)
for n1, n2 in intervals:
_inds = np.logical_and(fixations.lengths >= n1, fixations.lengths < n2)
inds = np.logical_or(inds, _inds)
return fixations[inds]
def filter_stimuli_by_number(stimuli, fixations, intervals):
intervals = _check_intervals(intervals, type=int)
mask = np.zeros(len(stimuli), dtype=bool)
for n1, n2 in intervals:
mask[n1:n2] = True
indices = list(np.nonzero(mask)[0])
return create_subset(stimuli, fixations, indices)
def _check_intervals(intervals, type=float):
if isinstance(intervals, (float, int)):
intervals = [intervals]
new_intervals = []
for interval in intervals:
new_intervals.append(_check_interval(interval, type=type))
return new_intervals
def _check_interval(interval, type=float):
if isinstance(interval, (float, int)):
interval = [interval]
if len(interval) == 1:
if type != int:
raise ValueError("single-value intervals only allowed for integer data!")
interval = [interval[0], interval[0] + 1]
if len(interval) != 2:
raise ValueError("Intervals need two values", interval)
new_interval = []
for value in interval:
if type(value) != value:
raise ValueError("Invalid value for this type", value, type)
new_interval.append(type(value))
return tuple(new_interval)
|
mit
|
krez13/scikit-learn
|
examples/svm/plot_iris.py
|
225
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
raghavrv/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
6
|
42990
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.utils.fixes import comb
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in np.bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
|
bsd-3-clause
|
BorisJeremic/Real-ESSI-Examples
|
education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Contact_Tangential_Interface_Behaviour_HardContact_Elastic_Perfectly_Plastic_Shear_Model/plot.py
|
6
|
1388
|
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(shear_strain,shear_stress/normal_stress,'-k',Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma$")
plt.ylabel(r"Normalized Shear $\tau/\sigma$")
plt.savefig("Contact_Tangential_Interface_Behavour.pdf", bbox_inches='tight')
plt.show()
# #####################################################################
|
cc0-1.0
|
QLaboratory/QlabChallengerRepo
|
ai_challenger_scene/resnet_predict_train_validation.py
|
1
|
10282
|
# -*- coding: utf-8 -*-
import os
import gc
import numpy as np
from PIL import Image
import json
import os
from datetime import datetime
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from sklearn.metrics import log_loss
from scale_layer import Scale
import sys
sys.setrecursionlimit(3000)
from scale_layer import Scale
SCENE_TEST_DATA_FOLDER_PATH = "/home/yan/Desktop/QlabChallengerRepo/dataset_224/scene_validation_content_resize"
PREDICT_MODEL = "/home/yan/Desktop/QlabChallengerRepo/ai_challenger_scene/trained_BEST_model/RESNET_MODEL_WEIGHTS_2017_11_08_20_54_01.h5"
def GetJpgList(p):
if p == "":
return []
#p = p.replace("/", "\\")
if p[-1] != "/":
p = p + "/"
file_list = os.listdir(p)
jpg_list = []
for i in file_list:
if os.path.isfile(p + i):
name, suffix = os.path.splitext(p + i)
if ('.jpg' == suffix):
jpg_list.append(i)
return jpg_list
def identity_block(input_tensor, kernel_size, filters, stage, block):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input ten vsor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
eps = 1.1e-5
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a', bias=False)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
name=conv_name_base + '2b', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum', name='res' + str(stage) + block)
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
'''
eps = 1.1e-5
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
name=conv_name_base + '2a', bias=False)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
name=conv_name_base + '2b', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides,
name=conv_name_base + '1', bias=False)(input_tensor)
shortcut = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '1')(shortcut)
shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)
x = merge([x, shortcut], mode='sum', name='res' + str(stage) + block)
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
return x
def resnet152_model(img_rows, img_cols, color_type=1, num_classes=None):
"""
Resnet 152 Model for Keras
Model Schema and layer naming follow that of the original Caffe implementation
https://github.com/KaimingHe/deep-residual-networks
ImageNet Pretrained Weights
Theano: https://drive.google.com/file/d/0Byy2AcGyEVxfZHhUT3lWVWxRN28/view?usp=sharing
TensorFlow: https://drive.google.com/file/d/0Byy2AcGyEVxfeXExMzNNOHpEODg/view?usp=sharing
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
eps = 1.1e-5
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='data')
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name='bn_conv1')(x)
x = Scale(axis=bn_axis, name='scale_conv1')(x)
x = Activation('relu', name='conv1_relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
for i in range(1,8):
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b'+str(i))
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
for i in range(1,36):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b'+str(i))
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)
x_newfc = Flatten()(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='fc8')(x_newfc)
model = Model(img_input, x_newfc)
gc.collect()
model.load_weights(PREDICT_MODEL, by_name=True)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
# Example to fine-tune on 3000 samples from Cifar10
img_rows, img_cols = 224, 224 # Resolution of inputs
channel = 3
num_classes = 80
# batch_size = 1
batch_size = 8
# nb_epoch = 10
nb_epoch = 1
# Load Scene data. Please implement your own load_data() module for your own dataset
if os.path.exists(SCENE_TEST_DATA_FOLDER_PATH):
test_data_files = GetJpgList(SCENE_TEST_DATA_FOLDER_PATH)
else:
print('Test data folder can not find ...')
# model = load_model(LAST_SAVED_MODEL)
model = resnet152_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes)
# Make predictions
predict_json = []
count = 1
totalnum = str(len(test_data_files))
# predict_annotation_dic_temp = {}
# predict_annotation_dic_temp['image_id'] = "1.jpg"
# predict_annotation_dic_temp['label_id'] = [1, 2, 3]
# predict_json.append(predict_annotation_dic_temp)
# predict_annotation_dic_temp = {}
# predict_annotation_dic_temp['image_id'] = "2.jpg"
# predict_annotation_dic_temp['label_id'] = [2, 3, 4]
# predict_json.append(predict_annotation_dic_temp)
for i in test_data_files:
im = Image.open(os.path.join(SCENE_TEST_DATA_FOLDER_PATH, i))
im_array = np.array(im).reshape(1, img_rows, img_cols, channel)
predictions_valid = model.predict(im_array, verbose=0)
predict_annotation_dic_temp = {}
predict_annotation_dic_temp['image_id'] = i
predict_label_id = predictions_valid[0]
predict_annotation_dic_temp['label_id'] = predict_label_id.tolist()
if (count % 100 == 0):
print(str(count) + "/" + totalnum)
count += 1
predict_json.append(predict_annotation_dic_temp)
predict_json_file_path = open("/home/yan/Desktop/ResNet152" + "_predict_validation.json", "w")
json.dump(predict_json, predict_json_file_path)
gc.collect()
|
mit
|
UKPLab/semeval2017-scienceie
|
code/stack5.py
|
1
|
6678
|
from __future__ import division
__author__ = "Ashish Airon"
from keras.models import Sequential
from keras.layers import Dense, Activation,Dropout
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import precision_recall_fscore_support,confusion_matrix
from keras.utils import np_utils
import xgboost as xgb
import pandas as pd
import numpy as np
from keras.models import Sequential
from sklearn.metrics import precision_recall_fscore_support,confusion_matrix
from keras.layers import Dense, Activation,Dropout
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
import time
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import train_test_split
import sys,os
if sys.version_info < (3,):
import cPickle
else:
import _pickle as cPickle
def check_blend(train, test):
'''
Unit tester to make sure dimension of blended train and test are same.
'''
if len(train) != len(test):
print("Length mismatch error of the blended dataset")
else :
print("All ok")
def model_withValidation(X_train_total, Y_train_total,X_test=None,Y_test=None,words_test=None,indices2labels=None,hiddenDim=250, filename_x = "none", filename_y = "none"):
X_train, X_dev, Y_train, Y_dev = train_test_split(X_train_total, Y_train_total, test_size=0.10, random_state=0)
model = Sequential()
model.add(Dense(output_dim=hiddenDim, input_dim=X_train.shape[1]))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dense(3))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['accuracy'])
weightsPath = "./tmp/myfooo2%s.dat"%(time.time())
checkpointer = ModelCheckpoint(filepath=weightsPath, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, verbose=2, nb_epoch=100, batch_size=32, validation_data=(X_dev,Y_dev),callbacks=[checkpointer])
model.load_weights(weightsPath)
loss, acc = model.evaluate(X_test,Y_test, batch_size=32)
print("loss : %0.5f Accuracy :%0.5f"%(loss,acc))
cf = confusion_matrix(Y_test[:,1],model.predict_classes(X_test))
print(cf)
predictions = model.predict_classes(X_test)
print("-->",predictions)
return model,predictions
def stack_multiclass(X, y, XTest, shuffle =False, n_folds = 10, num_class = 3, filename_x = "none", filename_y = "none"):
'''
Stacking method for multi-class.
Parameters :
X : Numpy training of size (number of sample X features)
y : Numpy training label pf size (number of samples,)
XTest : Numpy testing data of size (number of sample X features)
yTest : Numpy testing label pf size (number of samples,)
shuffle : To shuffle the training data or not. Default = False
nfolds : Number of folds to train the training data on.
num_class : The number of classes in the dataset. Default = 3
Returns :
A numpy blended train and test set of size (number of samples X (number of classifiers X number of classes -1))
'''
if shuffle:
idx = np.random.permutation(y.size)
X = X[idx]
y = y[idx]
skf = list(StratifiedKFold(y, n_folds))
clfs = [
RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
xgb.XGBClassifier(objective='multi:softprob',silent =False, n_estimators=40)]
dataset_blend_train_list = []
for j, clf in enumerate(clfs):
dataset_blend_train_list.append(np.zeros((X.shape[0], num_class-1 )))
dataset_blend_test_list = []
for j, clf in enumerate(clfs):
dataset_blend_test_list.append(np.zeros((XTest.shape[0], num_class-1 )))
for j, clf in enumerate(clfs):
print(j, clf)
dataset_blend_test_j_list = []
for ii in range(n_folds):
dataset_blend_test_j_list.append(np.zeros((XTest.shape[0], num_class-1)))
for i, (train, test) in enumerate(skf):
print("Fold Number : ", i)
X_train_N = X[train]
y_train_N= y[train]
X_test_N = X[test]
y_test_N = y[test]
clf.fit(X_train_N, y_train_N)
pred_prob_list = clf.predict_proba(X_test_N)
cf = confusion_matrix(y_test_N, clf.predict(X_test_N))
#print(cf)
dataset_blend_train_list[j][test] = pred_prob_list[:,:-1]
print(dataset_blend_train_list[j].shape)
dataset_blend_test_j_list[i][:, :] = clf.predict_proba(XTest)[:,:-1]
temp =0
for ff in range(n_folds):
temp += dataset_blend_test_j_list[ff]
# print "TEMP",temp/n_folds
dataset_blend_test_list[j] = temp/n_folds
check_blend(dataset_blend_train_list, dataset_blend_test_list)
# This needs to be changed depending on the number of classifiers.
blend_train = np.c_[dataset_blend_train_list[0], dataset_blend_train_list[1], dataset_blend_train_list[2], dataset_blend_train_list[3], dataset_blend_train_list[4]]
blend_test = np.c_[dataset_blend_test_list[0], dataset_blend_test_list[1], dataset_blend_test_list[2], dataset_blend_test_list[3], dataset_blend_test_list[4]]
return blend_train, blend_test, clfs
def read_input(filename):
data = pd.read_table(filename, header=None, delim_whitespace=True)
# Removing new line
data_new = data.dropna()
X = data_new.iloc[:, 2:]
Y = data_new.iloc[:, 1:2].replace({'O': 0, 'Arg-I': 1, 'Arg-B': 2})
return X.values, np_utils.to_categorical(Y.values)
|
apache-2.0
|
venkatarun95/genericCC
|
parsing_scripts/plot_ellipse.py
|
1
|
3785
|
# File copied gratefully from: https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def plot_point_cov(points, weights=None, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
weights : in case mean and covariance are to be weighted, A Nx1 array
Additional keyword arguments are passed on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
#weights = None
if weights is None:
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
else:
pos = [0.0, 0.0]
cov = [[0.0, 0.0], [0.0, 0.0]]
pos_denom = [0.0, 0.0]
cov_denom = [[0.0, 0.0], [0.0, 0.0]]
for pt, wt in zip(points, weights):
pos[0], pos[1] = pos[0] + pt[0]*wt[0], pos[1] + pt[1]*wt[1]
pos_denom[0] += wt[0]
pos_denom[1] += wt[1]
pos[0] /= pos_denom[0]
pos[1] /= pos_denom[1]
for pt, wt in zip(points, weights):
cov[0][0] += (wt[0]*(pt[0] - pos[0]))**2
cov[1][1] += (wt[1]*(pt[1] - pos[1]))**2
cov[0][1] += wt[0]*wt[1]*(pt[0] - pos[0])*(pt[1] - pos[1])
cov_denom[0][0] += wt[0]**2
cov_denom[1][1] += wt[1]**2
cov_denom[0][1] += wt[0]*wt[1]
cov_denom[1][0] = cov_denom[0][1]
cov[1][1] /= cov_denom[1][1]
cov[0][0] /= cov_denom[0][0]
cov[0][1] /= cov_denom[0][1]
cov[1][0] = cov[0][1]
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
if __name__ == '__main__':
#-- Example usage -----------------------
# Generate some random, correlated data
points = np.random.multivariate_normal(
mean=(1,1), cov=[[0.4, 9],[9, 10]], size=1000
)
# Plot the raw points...
x, y = points.T
plt.plot(x, y, 'ro')
# Plot a transparent 3 standard deviation covariance ellipse
plot_point_cov(points, nstd=3, alpha=0.5, color='green')
plt.show()
|
gpl-2.0
|
enriquesanchezb/practica_utad_2016
|
venv/lib/python2.7/site-packages/nltk/parse/transitionparser.py
|
3
|
31999
|
# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
from numpy import array
from scipy import sparse
from sklearn.datasets import load_svmlight_file
from sklearn import svm
except ImportError:
pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs)
def _check_informative(self, feat, flag=False):
"""
Check whether a feature is informative
The flag control whether "_" is informative or not
"""
if feat is None:
return False
if feat == '':
return False
if flag is False:
if feat == '_':
return False
return True
def extract_features(self):
"""
Extract the set of features for the current configuration. Implement standard features as describe in
Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
Please note that these features are very basic.
:return: list(str)
"""
result = []
# Todo : can come up with more complicated features set for better
# performance.
if len(self.stack) > 0:
# Stack 0
stack_idx0 = self.stack[len(self.stack) - 1]
token = self._tokens[stack_idx0]
if self._check_informative(token['word'], True):
result.append('STK_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('STK_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('STK_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('STK_0_FEATS_' + feat)
# Stack 1
if len(self.stack) > 1:
stack_idx1 = self.stack[len(self.stack) - 2]
token = self._tokens[stack_idx1]
if self._check_informative(token['tag']):
result.append('STK_1_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == stack_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('STK_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('STK_0_RDEP_' + dep_right_most)
# Check Buffered 0
if len(self.buffer) > 0:
# Buffer 0
buffer_idx0 = self.buffer[0]
token = self._tokens[buffer_idx0]
if self._check_informative(token['word'], True):
result.append('BUF_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('BUF_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('BUF_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('BUF_0_FEATS_' + feat)
# Buffer 1
if len(self.buffer) > 1:
buffer_idx1 = self.buffer[1]
token = self._tokens[buffer_idx1]
if self._check_informative(token['word'], True):
result.append('BUF_1_FORM_' + token['word'])
if self._check_informative(token['tag']):
result.append('BUF_1_POS_' + token['tag'])
if len(self.buffer) > 2:
buffer_idx2 = self.buffer[2]
token = self._tokens[buffer_idx2]
if self._check_informative(token['tag']):
result.append('BUF_2_POS_' + token['tag'])
if len(self.buffer) > 3:
buffer_idx3 = self.buffer[3]
token = self._tokens[buffer_idx3]
if self._check_informative(token['tag']):
result.append('BUF_3_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == buffer_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('BUF_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('BUF_0_RDEP_' + dep_right_most)
return result
class Transition(object):
"""
This class defines a set of transition which is applied to a configuration to get another configuration
Note that for different parsing algorithm, the transition is different.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self, alg_option):
"""
:param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type alg_option: str
"""
self._algo = alg_option
if alg_option not in [
TransitionParser.ARC_STANDARD,
TransitionParser.ARC_EAGER]:
raise ValueError(" Currently we only support %s and %s " %
(TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
def left_arc(self, conf, relation):
"""
Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if conf.buffer[0] == 0:
# here is the Root element
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = True
if self._algo == TransitionParser.ARC_EAGER:
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = False
if flag:
conf.stack.pop()
idx_wj = conf.buffer[0]
conf.arcs.append((idx_wj, relation, idx_wi))
else:
return -1
def right_arc(self, conf, relation):
"""
Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if self._algo == TransitionParser.ARC_STANDARD:
idx_wi = conf.stack.pop()
idx_wj = conf.buffer[0]
conf.buffer[0] = idx_wi
conf.arcs.append((idx_wi, relation, idx_wj))
else: # arc-eager
idx_wi = conf.stack[len(conf.stack) - 1]
idx_wj = conf.buffer.pop(0)
conf.stack.append(idx_wj)
conf.arcs.append((idx_wi, relation, idx_wj))
def reduce(self, conf):
"""
Note that the algorithm for reduce is only available for arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if self._algo != TransitionParser.ARC_EAGER:
return -1
if len(conf.stack) <= 0:
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = False
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = True
if flag:
conf.stack.pop() # reduce it
else:
return -1
def shift(self, conf):
"""
Note that the algorithm for shift is the SAME for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if len(conf.buffer) <= 0:
return -1
idx_wi = conf.buffer.pop(0)
conf.stack.append(idx_wi)
class TransitionParser(ParserI):
"""
Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
"""
ARC_STANDARD = 'arc-standard'
ARC_EAGER = 'arc-eager'
def __init__(self, algorithm):
"""
:param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type algorithm: str
"""
if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
raise ValueError(" Currently we only support %s and %s " %
(self.ARC_STANDARD, self.ARC_EAGER))
self._algorithm = algorithm
self._dictionary = {}
self._transition = {}
self._match_transition = {}
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
:param features: list of feature string which is needed to convert to binary features
:type features: list(str)
:return : string of binary features in libsvm format which is 'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
def _is_projective(self, depgraph):
arc_list = []
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
if parentIdx is not None:
arc_list.append((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_std(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
"""
operation = Transition(self.ARC_STANDARD)
count_proj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
count_proj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
precondition = True
# Get the max-index of buffer
maxID = conf._max_address
for w in range(maxID + 1):
if w != b0:
relw = self._get_dep_relation(b0, w, depgraph)
if relw is not None:
if (b0, relw, w) not in conf.arcs:
precondition = False
if precondition:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(
key,
binary_features,
input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(count_proj))
return training_seq
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
operation = Transition(self.ARC_EAGER)
countProj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
countProj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = Transition.REDUCE
self._write_to_file(key, binary_features, input_file)
operation.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(countProj))
return training_seq
def train(self, depgraphs, modelfile):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
:param modelfile : file name to save the trained model
:type modelfile : str
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
if self._algorithm == self.ARC_STANDARD:
self._create_training_examples_arc_std(depgraphs, input_file)
else:
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# Todo : because of probability = True => very slow due to
# cross-validation. Need to improve the speed here
model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=True,
probability=True)
model.fit(x_train, y_train)
# Save the model to file name (as pickle)
pickle.dump(model, open(modelfile, 'wb'))
finally:
remove(input_file.name)
def parse(self, depgraphs, modelFile):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:param modelfile: the model file
:type modelfile: str
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
# First load the model
model = pickle.load(open(modelFile, 'rb'))
operation = Transition(self._algorithm)
for depgraph in depgraphs:
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = array(sorted(col)) # NB : index must be sorted
np_row = array(row)
np_data = array(data)
x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
# It's best to use decision function as follow BUT it's not supported yet for sparse SVM
# Using decision funcion to build the votes array
#dec_func = model.decision_function(x_test)[0]
#votes = {}
#k = 0
# for i in range(len(model.classes_)):
# for j in range(i+1, len(model.classes_)):
# #if dec_func[k] > 0:
# votes.setdefault(i,0)
# votes[i] +=1
# else:
# votes.setdefault(j,0)
# votes[j] +=1
# k +=1
# Sort votes according to the values
#sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
# We will use predict_proba instead of decision_function
prob_dict = {}
pred_prob = model.predict_proba(x_test)[0]
for i in range(len(pred_prob)):
prob_dict[i] = pred_prob[i]
sorted_Prob = sorted(
prob_dict.items(),
key=itemgetter(1),
reverse=True)
# Note that SHIFT is always a valid operation
for (y_pred_idx, confidence) in sorted_Prob:
#y_pred = model.predict(x_test)[0]
# From the prediction match to the operation
y_pred = model.classes_[y_pred_idx]
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
baseTransition = strTransition.split(":")[0]
if baseTransition == Transition.LEFT_ARC:
if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.RIGHT_ARC:
if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.REDUCE:
if operation.reduce(conf) != -1:
break
elif baseTransition == Transition.SHIFT:
if operation.shift(conf) != -1:
break
else:
raise ValueError("The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def demo():
"""
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
>>> gold_sent = DependencyGraph(\"""
... Economic JJ 2 ATT
... news NN 3 SBJ
... has VBD 0 ROOT
... little JJ 5 ATT
... effect NN 3 OBJ
... on IN 5 ATT
... financial JJ 8 ATT
... markets NNS 6 PC
... . . 3 PU
... \""")
>>> conf = Configuration(gold_sent)
###################### Check the Initial Feature ########################
>>> print(', '.join(conf.extract_features()))
STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
###################### Check The Transition #######################
Check the Initialized Configuration
>>> print(conf)
Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
A. Do some transition checks for ARC-STANDARD
>>> operation = Transition('arc-standard')
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.left_arc(conf,"SBJ")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
Middle Configuration and Features Check
>>> print(conf)
Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
>>> print(', '.join(conf.extract_features()))
STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
>>> operation.right_arc(conf, "PC")
>>> operation.right_arc(conf, "ATT")
>>> operation.right_arc(conf, "OBJ")
>>> operation.shift(conf)
>>> operation.right_arc(conf, "PU")
>>> operation.right_arc(conf, "ROOT")
>>> operation.shift(conf)
Terminated Configuration Check
>>> print(conf)
Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
B. Do some transition checks for ARC-EAGER
>>> conf = Configuration(gold_sent)
>>> operation = Transition('arc-eager')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'SBJ')
>>> operation.right_arc(conf,'ROOT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'OBJ')
>>> operation.right_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'PC')
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.right_arc(conf,'PU')
>>> print(conf)
Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
###################### Check The Training Function #######################
A. Check the ARC-STANDARD training
>>> import tempfile
>>> import os
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
>>> parser_std = TransitionParser('arc-standard')
>>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
>>> parser_std.train([gold_sent],'temp.arcstd.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
B. Check the ARC-EAGER training
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
>>> parser_eager = TransitionParser('arc-eager')
>>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
>>> parser_eager.train([gold_sent],'temp.arceager.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
###################### Check The Parsing Function ########################
A. Check the ARC-STANDARD parser
>>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
B. Check the ARC-EAGER parser
>>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
Note that result is very poor because of only one training example.
"""
|
apache-2.0
|
CrazyGuo/bokeh
|
examples/app/stock_applet/stock_app.py
|
42
|
7786
|
"""
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import HBox, VBox, VBoxForm, PreText, Select
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
class StockApp(VBox):
extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
jsmodel = "VBox"
# text statistics
pretext = Instance(PreText)
# plots
plot = Instance(Plot)
line_plot1 = Instance(Plot)
line_plot2 = Instance(Plot)
hist1 = Instance(Plot)
hist2 = Instance(Plot)
# data source
source = Instance(ColumnDataSource)
# layout boxes
mainrow = Instance(HBox)
histrow = Instance(HBox)
statsbox = Instance(VBox)
# inputs
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
ticker1_select = Instance(Select)
ticker2_select = Instance(Select)
input_box = Instance(VBoxForm)
def __init__(self, *args, **kwargs):
super(StockApp, self).__init__(*args, **kwargs)
self._dfs = {}
@classmethod
def create(cls):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
# create layout widgets
obj = cls()
obj.mainrow = HBox()
obj.histrow = HBox()
obj.statsbox = VBox()
obj.input_box = VBoxForm()
# create input widgets
obj.make_inputs()
# outputs
obj.pretext = PreText(text="", width=500)
obj.make_source()
obj.make_plots()
obj.make_stats()
# layout
obj.set_children()
return obj
def make_inputs(self):
self.ticker1_select = Select(
name='ticker1',
value='AAPL',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
self.ticker2_select = Select(
name='ticker2',
value='GOOG',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
@property
def selected_df(self):
pandas_df = self.df
selected = self.source.selected['1d']['indices']
if selected:
pandas_df = pandas_df.iloc[selected, :]
return pandas_df
def make_source(self):
self.source = ColumnDataSource(data=self.df)
def line_plot(self, ticker, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=self.source,
nonselection_alpha=0.02
)
return p
def hist_plot(self, ticker):
global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def make_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=self.source
)
self.plot = p
self.line_plot1 = self.line_plot(ticker1)
self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
self.hist_plots()
def hist_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
self.hist1 = self.hist_plot(ticker1)
self.hist2 = self.hist_plot(ticker2)
def set_children(self):
self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
self.mainrow.children = [self.input_box, self.plot, self.statsbox]
self.input_box.children = [self.ticker1_select, self.ticker2_select]
self.histrow.children = [self.hist1, self.hist2]
self.statsbox.children = [self.pretext]
def input_change(self, obj, attrname, old, new):
if obj == self.ticker2_select:
self.ticker2 = new
if obj == self.ticker1_select:
self.ticker1 = new
self.make_source()
self.make_plots()
self.set_children()
curdoc().add(self)
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
if self.ticker1_select:
self.ticker1_select.on_change('value', self, 'input_change')
if self.ticker2_select:
self.ticker2_select.on_change('value', self, 'input_change')
def make_stats(self):
stats = self.selected_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
self.hist_plots()
self.set_children()
curdoc().add(self)
@property
def df(self):
return get_data(self.ticker1, self.ticker2)
# The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# will render this StockApp. If you don't want serve this applet from a Bokeh
# server (for instance if you are embedding in a separate Flask application),
# then just remove this block of code.
@bokeh_app.route("/bokeh/stocks/")
@object_page("stocks")
def make_stocks():
app = StockApp.create()
return app
|
bsd-3-clause
|
sbonner0/DeepTopologyClassification
|
src/legacy/MultiInputModels.py
|
1
|
2553
|
# Stephen Bonner 2016 - Durham University
# Keras deep feedforward models for multiclass classification
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import Utils as ut
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras.models import model_from_yaml
from sklearn import cross_validation
def createModel1H(optimizer='rmsprop', init='glorot_uniform'):
model = Sequential()
model.add(Dense(32, input_dim=54, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(5, kernel_initializer=init))
model.add(Activation('softmax'))
# Compile the model
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def createModel2H(optimizer='rmsprop', init='glorot_uniform'):
# Create the model
model = Sequential()
# Input layer plus first hidden layer
model.add(Dense(32, input_dim=54, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Add second hidden layer
model.add(Dense(16, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Add output layer
model.add(Dense(5, kernel_initializer=init))
model.add(Activation('softmax'))
# Compile the model
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def createMode3HlWithDropout(optimizer='rmsprop', init='glorot_uniform'):
# Create the model
model = Sequential()
# Input layer plus first hidden layer
model.add(Dense(1000, input_dim=54, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Second layer
model.add(Dense(512, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Third layer
model.add(Dense(256, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Four layer
model.add(Dense(32, kernel_initializer=init))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Add output layer
model.add(Dense(5, kernel_initializer=init))
model.add(Activation('softmax'))
# Compile the model
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
gpl-3.0
|
weissercn/learningml
|
learningml/GoF/analysis/event_shapes_lower_level_scale/plot_event_shapes_lower_level_alphaSvalue_analysis.py
|
1
|
10913
|
from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
# Options for mode 'lower_level'
MODE = 'lower_level'
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf")
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# E V E N T S H A P E S - L O W E R L E V E L
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if MODE == 'lower_level':
#dimensions = [2,3,4,5,6,7,8,9,10]
#dimensions = [1,2,3,4,5]
#param_list = [0.130,0.132,0.133,0.134,0.135,0.14]
#param_list = [0.130,0.132,0.133,0.134,0.135,0.1365,0.14]
param_list = [0.130, 0.132,0.133,0.134,0.1345,0.135,0.1355,0.136,0.137,0.1375,0.138,0.139,0.14]
ml_classifiers = ['nn','bdt','xgb','svm']
ml_classifiers_colors = ['green','magenta','cyan']
ml_classifiers_bin = 5
chi2_color = 'red'
chi2_splits = [1,2,3,4,5,6,7,8,9,10]
#chi2_splits = [8]
ml_folder_name = "automatisation_monash_alphaSvalue_lower_level/evaluation_monash_lower_level_2files_attempt4"
#chi2_folder_name = "event_shapes_lower_level_scale"
chi2_folder_name = "event_shapes_lower_level_scale_uniform"
#chi2_folder_name = "event_shapes_lower_level_without_Mult"
ml_file_name = "{1}_monash_{0}_alphaSvalue_lower_level_syst_0_01__p_values"
#chi2_file_name = "event_shapes_lower_level_scale_syst_0_01__{0}D_chi2_{1}_splits_p_values"
chi2_file_name = "event_shapes_lower_level_scale_uniform_syst_0_01__{0}D_chi2_{1}_splits_p_values"
#chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4_without_Mult__{0}D_chi2_{1}_splits_p_values"
chi2_thrust_folder_name = "event_shapes_thrust"
chi2_thrust_file_name = "event_shapes_thrust_syst_0_01_attempt4__{0}D_chi2_{1}_splits_p_values"
title = "Event shapes lower level"
name = "event_shapes_lower_level"
CL = 0.95
ml_classifiers_dict={}
chi2_splits_dict={}
chi2_thrust_splits_dict={}
#xwidth = [0.5]*len(param_list)
xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
xwidth_left = np.append(xwidth[0] , xwidth)
xwidth_right = np.append(xwidth,xwidth[-1])
print("xwidth : ", xwidth)
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
if True:
for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
ml_classifiers_dict[ml_classifier]= []
for param in param_list:
p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)
ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
print("bdt : ", ml_classifiers_dict['bdt'])
print("xgb : ", ml_classifiers_dict['xgb'])
ml_classifiers_dict['BDT_best']= [max(item1,item2) for item1, item2 in zip(ml_classifiers_dict['bdt'],ml_classifiers_dict['xgb'])]
print("BDT : ", ml_classifiers_dict['BDT_best'])
ax.errorbar(param_list,ml_classifiers_dict['BDT_best'], yerr=binomial_error(ml_classifiers_dict['BDT_best']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)
ax.errorbar(param_list,ml_classifiers_dict['svm'], yerr=binomial_error(ml_classifiers_dict['svm']), linestyle='-', marker='^', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[2], label=r'$SVM$', clip_on=False)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_splits_dict[str(chi2_split)]=[]
chi2_best = []
for param in param_list:
chi2_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_splits_dict[str(chi2_split)].append(temp)
chi2_best_dim.append(temp)
temp_best = np.max(chi2_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_thrust_split_index, chi2_thrust_split in enumerate(chi2_splits):
chi2_thrust_splits_dict[str(chi2_thrust_split)]=[]
chi2_thrust_best = []
for param in param_list:
chi2_thrust_best_dim = []
for chi2_thrust_split_index, chi2_thrust_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_thrust_folder_name+"/"+chi2_thrust_file_name.format(param,chi2_thrust_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_thrust_splits_dict[str(chi2_thrust_split)].append(temp)
chi2_thrust_best_dim.append(temp)
temp_best = np.max(chi2_thrust_best_dim)
#print(str(dim)+"D chi2_thrust_best_dim : ", chi2_thrust_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_thrust_best.append(temp_best)
#print("chi2_thrust_best : ",chi2_thrust_best)
print("param_list : ",param_list)
print("chi2_best : ", chi2_best)
print("chi2_splits_dict : ", chi2_splits_dict)
ax.errorbar(param_list,chi2_best, yerr=binomial_error(chi2_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='black', label=r'$\chi^2 scaled$', clip_on=False)
ax.errorbar(param_list,chi2_thrust_best, yerr=binomial_error(chi2_thrust_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='grey', label=r'$\chi^2 Thrust$', clip_on=False)
print("ml_classifiers_dict : ",ml_classifiers_dict)
print("chi2_best : ", chi2_best)
ax.plot((0.1365,0.1365),(0.,1.),c="grey",linestyle="--")
ax.set_xlim([0.129,0.1405])
ax.set_ylim([0.,1.])
ax.set_xlabel(r"$\alpha_{S}$")
ax.set_ylabel("Fraction rejected")
a, b, c = [0.130,0.133], [0.1365],[0.14]
ax.set_xticks(a+b+c)
xx, locs = plt.xticks()
ll = ['%.3f' % y for y in a] + ['%.4f' % y for y in b] + ['%.3f' % y for y in c]
plt.xticks(xx, ll)
#ax.legend(loc='lower left', frameon=False, numpoints=1)
fig_leg = plt.figure(figsize=(8,2.7))
ax_leg = fig_leg.add_axes([0.0,0.0,1.0,1.0])
plt.tick_params(axis='x',which='both',bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y',which='both',bottom='off', top='off', labelbottom='off')
ax_leg.yaxis.set_ticks_position('none')
ax_leg.set_frame_on(False)
plt.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',frameon=False, numpoints=1,ncol=2)
fig_leg.savefig("event_shapes_lower_level_analysis_legend.pdf")
#fig_name=name+"_alphaSvalue_analysis"
fig_name="event_shapes_lower_level_analysis"
fig.savefig(fig_name+".pdf")
fig.savefig(fig_name+"_"+time.strftime("%b_%d_%Y")+".pdf")
print("Saved the figure as" , fig_name+".pdf")
|
mit
|
ffu/DSA-3.2.2
|
gr-utils/src/python/plot_data.py
|
2
|
5888
|
#
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
matplotlib.interactive(True)
matplotlib.use('TkAgg')
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(self.f)
if(len(f) == 0):
print "End of File"
else:
self.f = f
self.time = [i*(1/self.sample_rate) for i in range(len(self.f))]
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
|
gpl-3.0
|
altairpearl/scikit-learn
|
sklearn/decomposition/tests/test_factor_analysis.py
|
112
|
3203
|
# Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
|
bsd-3-clause
|
nomadcube/scikit-learn
|
benchmarks/bench_plot_omp_lars.py
|
266
|
4447
|
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
|
bsd-3-clause
|
omni5cience/django-inlineformfield
|
.tox/py27/lib/python2.7/site-packages/IPython/sphinxext/ipython_directive.py
|
13
|
41235
|
# -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
Pseudo-Decorators
=================
Note: Only one decorator is supported per input. If more than one decorator
is specified, then only the last one is used.
In addition to the Pseudo-Decorators/options described at the above link,
several enhancements have been made. The directive will emit a message to the
console at build-time if code-execution resulted in an exception or warning.
You can suppress these on a per-block basis by specifying the :okexcept:
or :okwarning: options:
.. code-block:: rst
.. ipython::
:okexcept:
:okwarning:
In [1]: 1/0
In [2]: # raise warning.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print x
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in processed_output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(processed_output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(('-' * 76) + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' {0}'.format(line)
for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
|
mit
|
bassio/omicexperiment
|
omicexperiment/transforms/filters/sample.py
|
1
|
1678
|
import pandas as pd
from omicexperiment.transforms.transform import Filter, AttributeFilter, GroupByTransform, FlexibleOperatorMixin, AttributeFlexibleOperatorMixin, TransformObjectsProxy
from omicexperiment.transforms.sample import SampleGroupBy, SampleSumCounts
class SampleMinCount(Filter):
def __dapply__(self, experiment):
if self.operator == '__eq__':
assert isinstance(self.value, int)
df = experiment.data_df
criteria = (df.sum() >= self.value)
return df[criteria.index[criteria]]
class SampleMaxCount(Filter):
def __dapply__(self, experiment):
if self.operator == '__eq__':
assert isinstance(self.value, int)
df = experiment.data_df
criteria = (df.sum() <= self.value)
return df[criteria.index[criteria]]
class SampleCount(FlexibleOperatorMixin, Filter):
def __dapply__(self, experiment):
_op = self._op_function(experiment.data_df.sum())
criteria = _op(self.value)
criteria = _op(self.value)
return experiment.data_df.reindex(columns=criteria.index[criteria])
class SampleAttributeFilter(AttributeFilter, AttributeFlexibleOperatorMixin):
def __dapply__(self, experiment):
_op = self._op_function(experiment.mapping_df)
criteria = _op(self.value)
return experiment.data_df.reindex(columns=criteria.index[criteria])
class Sample(TransformObjectsProxy):
#not_in =
#in_
count = SampleCount()
att = SampleAttributeFilter()
c = SampleAttributeFilter()
groupby = SampleGroupBy()
sum_counts = SampleSumCounts()
|
bsd-3-clause
|
Sotera/social-sandbox
|
images/color_hist_featurize.py
|
1
|
1300
|
# USAGE
# python color_kmeans.py --image images/jp.png --clusters 3
# import the necessary packages
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import argparse, utils, os, cv2
args = {
'plot' : False,
'indir' : 'baltimore_images',
'outfile' : 'baltimore_features'
}
def make_hist(f, plot = False, n_bins = 5):
image = cv2.imread(f)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
if args['plot']:
plt.figure()
plt.axis("off")
plt.imshow(image)
color = ('r','g','b')
features = []
for i,col in enumerate(color):
hist = cv2.calcHist([image], [i], None, [n_bins], [0,256])
features.extend(hist.flatten())
if args['plot']:
plt.plot(hist,color = col)
plt.xlim([0,256])
# Normalized by total number of pixel-channels
sm = sum(features)
features = [x / sm for x in features]
return features
def write_hist(outfile, hist, fileName):
hist = [str(f) for f in hist]
outfile.write("%s,%s\n" % (fileName, ",".join(hist)))
files = os.listdir(args['indir'])
files = [os.path.join(args['indir'], f) for f in files]
with open(args['outfile'], 'w') as outfile:
for fileName in files:
try:
hist = make_hist(fileName, plot = args['plot'])
write_hist(outfile, hist, fileName)
except:
'error @ ' + fileName
|
apache-2.0
|
quheng/scikit-learn
|
benchmarks/bench_plot_fastkmeans.py
|
294
|
4676
|
from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/viewer/plugins/color_histogram.py
|
40
|
3271
|
import numpy as np
import matplotlib.pyplot as plt
from ... import color, exposure
from .plotplugin import PlotPlugin
from ..canvastools import RectangleTool
class ColorHistogram(PlotPlugin):
name = 'Color Histogram'
def __init__(self, max_pct=0.99, **kwargs):
super(ColorHistogram, self).__init__(height=400, **kwargs)
self.max_pct = max_pct
print(self.help())
def attach(self, image_viewer):
super(ColorHistogram, self).attach(image_viewer)
self.rect_tool = RectangleTool(self,
on_release=self.ab_selected)
self._on_new_image(image_viewer.image)
def _on_new_image(self, image):
self.lab_image = color.rgb2lab(image)
# Calculate color histogram in the Lab colorspace:
L, a, b = self.lab_image.T
left, right = -100, 100
ab_extents = [left, right, right, left]
self.mask = np.ones(L.shape, bool)
bins = np.arange(left, right)
hist, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(),
bins, normed=True)
self.data = {'bins': bins, 'hist': hist, 'edges': (x_edges, y_edges),
'extents': (left, right, left, right)}
# Clip bin heights that dominate a-b histogram
max_val = pct_total_area(hist, percentile=self.max_pct)
hist = exposure.rescale_intensity(hist, in_range=(0, max_val))
self.ax.imshow(hist, extent=ab_extents, cmap=plt.cm.gray)
self.ax.set_title('Color Histogram')
self.ax.set_xlabel('b')
self.ax.set_ylabel('a')
def help(self):
helpstr = ("Color Histogram tool:",
"Select region of a-b colorspace to highlight on image.")
return '\n'.join(helpstr)
def ab_selected(self, extents):
x0, x1, y0, y1 = extents
self.data['extents'] = extents
lab_masked = self.lab_image.copy()
L, a, b = lab_masked.T
self.mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
lab_masked[..., 1:][~self.mask.T] = 0
self.image_viewer.image = color.lab2rgb(lab_masked)
def output(self):
"""Return the image mask and the histogram data.
Returns
-------
mask : array of bool, same shape as image
The selected pixels.
data : dict
The data describing the histogram and the selected region.
The dictionary contains:
- 'bins' : array of float
The bin boundaries for both `a` and `b` channels.
- 'hist' : 2D array of float
The normalized histogram.
- 'edges' : tuple of array of float
The bin edges along each dimension
- 'extents' : tuple of float
The left and right and top and bottom of the selected region.
"""
return (self.mask, self.data)
def pct_total_area(image, percentile=0.80):
"""Return threshold value based on percentage of total area.
The specified percent of pixels less than the given intensity threshold.
"""
idx = int((image.size - 1) * percentile)
sorted_pixels = np.sort(image.flat)
return sorted_pixels[idx]
|
gpl-3.0
|
irhete/predictive-monitoring-benchmark
|
transformers/AggregateTransformer.py
|
1
|
2165
|
from sklearn.base import TransformerMixin
import pandas as pd
import numpy as np
from time import time
import sys
class AggregateTransformer(TransformerMixin):
def __init__(self, case_id_col, cat_cols, num_cols, boolean=False, fillna=True):
self.case_id_col = case_id_col
self.cat_cols = cat_cols
self.num_cols = num_cols
self.boolean = boolean
self.fillna = fillna
self.columns = None
self.fit_time = 0
self.transform_time = 0
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
start = time()
# transform numeric cols
if len(self.num_cols) > 0:
dt_numeric = X.groupby(self.case_id_col)[self.num_cols].agg({'mean':np.mean, 'max':np.max, 'min':np.min, 'sum':np.sum, 'std':np.std})
dt_numeric.columns = ['_'.join(col).strip() for col in dt_numeric.columns.values]
# transform cat cols
dt_transformed = pd.get_dummies(X[self.cat_cols])
dt_transformed[self.case_id_col] = X[self.case_id_col]
del X
if self.boolean:
dt_transformed = dt_transformed.groupby(self.case_id_col).max()
else:
dt_transformed = dt_transformed.groupby(self.case_id_col).sum()
# concatenate
if len(self.num_cols) > 0:
dt_transformed = pd.concat([dt_transformed, dt_numeric], axis=1)
del dt_numeric
# fill missing values with 0-s
if self.fillna:
dt_transformed = dt_transformed.fillna(0)
# add missing columns if necessary
if self.columns is None:
self.columns = dt_transformed.columns
else:
missing_cols = [col for col in self.columns if col not in dt_transformed.columns]
for col in missing_cols:
dt_transformed[col] = 0
dt_transformed = dt_transformed[self.columns]
self.transform_time = time() - start
return dt_transformed
def get_feature_names(self):
return self.columns
|
apache-2.0
|
PatrickChrist/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/mne/viz/topo.py
|
6
|
23928
|
"""Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import warnings
from itertools import cycle
from functools import partial
import numpy as np
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names, deprecated
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
add_background_image)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
from ..channels import find_layout
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
for idx, name in iter_ch:
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ch_idx = ch_names.index(name)
vars(ax)['_mne_ch_name'] = name
vars(ax)['_mne_ch_idx'] = ch_idx
vars(ax)['_mne_ax_face_color'] = axis_facecolor
yield ax, ch_idx
def _plot_topo(info=None, times=None, show_func=None, layout=None,
decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', axis_facecolor='k', fig_facecolor='k',
cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
y_label=None, vline=None, font_color='w'):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
ax.axis('off')
my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if ylim_ and not any(v is None for v in ylim_):
plt.ylim(*ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
return fig
def _plot_topo_onpick(event, show_func=None, colorbar=False):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None:
return
import matplotlib.pyplot as plt
try:
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise err
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap='RdBu_r', title=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
extent = (tmin, tmax, freq[0], freq[-1])
img = ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if isinstance(ax, plt.Axes):
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
else:
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
if title:
plt.title(title)
if not isinstance(ax, plt.Axes):
ax = plt.gca()
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False):
""" Aux function to show time series on topo """
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
for x in vline:
plt.axvline(x, color='w', linewidth=0.5)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and vlim is not None
@deprecated("It will be removed in version 0.11. "
"Please use evoked.plot_topo or viz.evoked.plot_evoked_topo "
"for list of evoked instead.")
def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None, proj=False,
vline=[0.0], fig_facecolor='k', fig_background=None,
axis_facecolor='k', font_color='w', show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, show=show)
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warnings.warn('More evoked objects than colors available.'
'You should pass a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all((e.times == times).all() for e in evoked):
raise ValueError('All evoked.times must be the same')
info = evoked[0].info
ch_names = evoked[0].ch_names
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(info)
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = set(('mag', 'grad'))
is_meg = len(set.intersection(types_used, meg_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, exclude=[], **types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
scalings = _handle_default('scalings', scalings)
evoked = [e.copy() for e in evoked]
for e in evoked:
for pick, t in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[t]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
if ylim is None:
def set_ylim(x):
return np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
color=color, times=times, vline=vline)
fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
decim=1, colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor,
title=title, x_label='Time (s)', vline=vline)
if fig_background is not None:
add_background_image(fig, fig_background)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
if show:
plt.show()
return fig
def _plot_update_evoked_topo(params, bools):
"""Helper function to update topo sensor plots"""
evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
evokeds = [e.copy() for e in evokeds]
for e in evokeds:
e.info['projs'] = []
e.add_proj(projs)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
axes = fig.get_axes()
n_lines = len(axes[0].lines)
n_diff = len(evokeds) - n_lines
ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
for ax in axes:
lines = ax.lines[ax_slice]
for line, evoked in zip(lines, evokeds):
line.set_data(times, evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None,
order=None, scalings=None, vline=None,
x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r'):
"""Aux function to plot erfimage on sensor topography"""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
ch_type = channel_type(epochs.info, ch_idx)
if ch_type not in scalings:
raise KeyError('%s channel type not in scalings' % ch_type)
this_data *= scalings[ch_type]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True,
cmap=cmap, interpolation='nearest')
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
scalings = _handle_default('scalings', scalings)
data = epochs.get_data()
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(epochs.info)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma,
cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=erf_imshow, layout=layout, decim=1,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor,
font_color=font_color, border=border,
x_label='Time (s)', y_label='Epoch')
if show:
plt.show()
return fig
|
bsd-3-clause
|
kylerbrown/scikit-learn
|
sklearn/manifold/t_sne.py
|
106
|
20057
|
# Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
|
bsd-3-clause
|
aetilley/scikit-learn
|
doc/sphinxext/gen_rst.py
|
142
|
40026
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
bsd-3-clause
|
VikParuchuri/scan
|
core/algo/scorer.py
|
1
|
3181
|
import calendar
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from core.algo.features import FeatureGenerator
import numpy as np
from sklearn import cross_validation
from sklearn.externals import joblib
from core.database.models import Model
from app import db
from datetime import datetime
from scan import settings
import os
class NoModelException(Exception):
pass
class Manager(object):
def __init__(self, question):
self.question = question
def score_essay(self, essay):
text = essay.text
model = self.get_latest_model()
model_obj = joblib.load(os.path.join(settings.MODEL_PATH, model.path))
return model_obj.predict(text)
def get_latest_model(self):
models = self.question.models
if len(models) == 0:
raise NoModelException
model = models[-1]
return model
def create_model(self):
text = [e.text for e in self.question.essays if e.actual_score is not None]
scores = [e.actual_score for e in self.question.essays if e.actual_score is not None]
scorer = Scorer(text, scores)
scorer.train()
time = datetime.utcnow()
timestamp = calendar.timegm(time.utctimetuple())
path_string = "{0}_{1}.pickle".format(self.question.id, timestamp)
model = Model(
question=self.question,
error=scorer.cv_score,
path=path_string
)
db.session.add(model)
joblib.dump(scorer, os.path.join(settings.MODEL_PATH, path_string), compress=9)
db.session.commit()
class Scorer(object):
classification_max = 4
cv_folds = 2
def __init__(self, text, scores):
self.text = text
self.scores = scores
self.feature_generator = FeatureGenerator()
self.classifier = RandomForestRegressor(
n_estimators=100,
min_samples_split=4,
min_samples_leaf=3,
random_state=1
)
unique_scores = set(scores)
if len(unique_scores) <= self.classification_max:
self.classifier = RandomForestClassifier(
n_estimators=100,
min_samples_split=4,
min_samples_leaf=3,
random_state=1
)
self.fit_feats()
self.fit_done = False
def fit_feats(self):
self.feature_generator.fit(self.text, self.scores)
def get_features(self):
feats = []
for t in self.text:
feats.append(self.feature_generator.get_features(t))
feat_mat = np.vstack(feats)
return feat_mat
def train(self):
feats = self.get_features()
scores = np.array(self.scores)
# Compute error metrics for the estimator.
self.cv_scores = cross_validation.cross_val_score(self.classifier, feats, scores)
self.cv_score = self.cv_scores.mean()
self.cv_dev = self.cv_scores.std()
self.classifier.fit(feats, scores)
self.fit_done = True
def predict(self, text):
feats = self.feature_generator.get_features(text)
return self.classifier.predict(feats)
|
agpl-3.0
|
zfrenchee/pandas
|
pandas/tests/frame/test_convert_to.py
|
1
|
10568
|
# -*- coding: utf-8 -*-
from datetime import datetime
import pytest
import pytz
import collections
import numpy as np
from pandas import compat
from pandas.compat import long
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(TestData):
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
assert df.to_records()['index'][0] == df.index[0]
rs = df.to_records(convert_datetime64=False)
assert rs['index'][0] == df.index.values[0]
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['<i8', '<f8']}
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
collections.OrderedDict])
def test_to_dict(self, mapping):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
# GH16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp", mapping)
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r", mapping)
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert (len(recons_data) == 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
df = DataFrame(test_data)
df['duped'] = df[df.columns[0]]
recons_data = df.to_dict("i")
comp_data = test_data.copy()
comp_data['duped'] = comp_data[df.columns[0]]
for k, v in compat.iteritems(comp_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
@pytest.mark.parametrize('mapping', [
list,
collections.defaultdict,
[]])
def test_to_dict_errors(self, mapping):
# GH16122
df = DataFrame(np.random.randn(3, 3))
with pytest.raises(TypeError):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(self, tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
def test_to_dict_box_scalars(self):
# 14216
# make sure that we are boxing properly
d = {'a': [1], 'b': ['b']}
result = DataFrame(d).to_dict()
assert isinstance(list(result['a'])[0], (int, long))
assert isinstance(list(result['b'])[0], (int, long))
result = DataFrame(d).to_dict(orient='records')
assert isinstance(result[0]['a'], (int, long))
def test_frame_to_dict_tz(self):
# GH18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]
df = DataFrame(list(data), columns=["d", ])
result = df.to_dict(orient='records')
expected = [
{'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},
{'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
|
bsd-3-clause
|
grlee77/pywt
|
doc/source/pyplots/plot_boundary_modes.py
|
3
|
1472
|
"""A visual illustration of the various signal extension modes supported in
PyWavelets. For efficiency, in the C routines the array is not actually
extended as is done here. This is just a demo for easier visual explanation of
the behavior of the various boundary modes.
In practice, which signal extension mode is beneficial will depend on the
signal characteristics. For this particular signal, some modes such as
"periodic", "antisymmetric" and "zero" result in large discontinuities that
would lead to large amplitude boundary coefficients in the detail coefficients
of a discrete wavelet transform.
"""
import numpy as np
from matplotlib import pyplot as plt
from pywt._doc_utils import boundary_mode_subplot
# synthetic test signal
x = 5 - np.linspace(-1.9, 1.1, 9)**2
# Create a figure with one subplots per boundary mode
fig, axes = plt.subplots(3, 3, figsize=(10, 6))
plt.subplots_adjust(hspace=0.5)
axes = axes.ravel()
boundary_mode_subplot(x, 'symmetric', axes[0], symw=False)
boundary_mode_subplot(x, 'reflect', axes[1], symw=True)
boundary_mode_subplot(x, 'periodic', axes[2], symw=False)
boundary_mode_subplot(x, 'antisymmetric', axes[3], symw=False)
boundary_mode_subplot(x, 'antireflect', axes[4], symw=True)
boundary_mode_subplot(x, 'periodization', axes[5], symw=False)
boundary_mode_subplot(x, 'smooth', axes[6], symw=False)
boundary_mode_subplot(x, 'constant', axes[7], symw=False)
boundary_mode_subplot(x, 'zero', axes[8], symw=False)
plt.show()
|
mit
|
grundgruen/zipline
|
tests/utils/test_factory.py
|
34
|
2175
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pandas as pd
import pytz
import numpy as np
from zipline.utils.factory import (load_from_yahoo,
load_bars_from_yahoo)
class TestFactory(TestCase):
def test_load_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=stocks, start=start, end=end)
assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.columns
np.testing.assert_raises(
AssertionError, load_from_yahoo, stocks=stocks,
start=end, end=start
)
def test_load_bars_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=stocks, start=start, end=end)
assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.items
for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']:
assert ohlc in data.minor_axis
np.testing.assert_raises(
AssertionError, load_bars_from_yahoo, stocks=stocks,
start=end, end=start
)
|
apache-2.0
|
nixingyang/Kaggle-Face-Verification
|
Quora Question Pairs/solution_deep_learning.py
|
1
|
21841
|
from __future__ import absolute_import, division, print_function
import matplotlib
matplotlib.use("Agg")
import os
import re
import glob
import pylab
import numpy as np
import pandas as pd
from gensim.models import KeyedVectors
from string import ascii_lowercase, punctuation
from keras import backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Dropout, Embedding, Input, Lambda, LSTM, merge
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.optimizers import Nadam
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.visualize_util import plot
from sklearn.model_selection import StratifiedKFold
# Dataset
PROJECT_NAME = "Quora Question Pairs"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset", PROJECT_NAME)
TRAIN_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "train.csv")
TEST_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "test.csv")
EMBEDDING_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "glove.42B.300d_word2vec.txt")
DATASET_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "deep_learning_dataset.npz")
MAX_SEQUENCE_LENGTH = 30
# Output
OUTPUT_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "{}_output".format(os.path.basename(__file__).split(".")[0]))
OPTIMAL_WEIGHTS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "Optimal Weights")
SUBMISSION_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "Submission")
# Training and Testing procedure
SPLIT_NUM = 10
RANDOM_STATE = 666666
PATIENCE = 4
BATCH_SIZE = 2048
MAXIMUM_EPOCH_NUM = 1000
TARGET_MEAN_PREDICTION = 0.175 # https://www.kaggle.com/davidthaler/how-many-1-s-are-in-the-public-lb
def correct_typo(word, word_to_index_dict, known_typo_dict, min_word_length=8):
def get_candidate_word_list(word):
# https://www.kaggle.com/cpmpml/spell-checker-using-word2vec/notebook
left_word_with_right_word_list = [(word[:index], word[index:]) for index in range(len(word) + 1)]
deleted_word_list = [left_word + right_word[1:] for left_word, right_word in left_word_with_right_word_list if right_word]
transposed_word_list = [left_word + right_word[1] + right_word[0] + right_word[2:] for left_word, right_word in left_word_with_right_word_list if len(right_word) > 1]
replaced_word_list = [left_word + character + right_word[1:] for left_word, right_word in left_word_with_right_word_list if right_word for character in ascii_lowercase]
inserted_word_list = [left_word + character + right_word for left_word, right_word in left_word_with_right_word_list for character in ascii_lowercase]
return list(set(deleted_word_list + transposed_word_list + replaced_word_list + inserted_word_list))
if word in word_to_index_dict:
return word
if len(word) < min_word_length:
return ""
if word in known_typo_dict:
return known_typo_dict[word]
candidate_word_list = get_candidate_word_list(word)
candidate_word_with_index_array = np.array([(candidate_word, word_to_index_dict[candidate_word]) for candidate_word in candidate_word_list if candidate_word in word_to_index_dict])
if len(candidate_word_with_index_array) == 0:
selected_candidate_word = ""
else:
selected_candidate_word = candidate_word_with_index_array[np.argmin(candidate_word_with_index_array[:, -1].astype(np.int))][0]
print("Replacing {} with {} ...".format(word, selected_candidate_word))
known_typo_dict[word] = selected_candidate_word
return selected_candidate_word
def clean_sentence(original_sentence, word_to_index_dict, known_typo_dict, result_when_failure="empty"):
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
try:
# Convert to lower case
cleaned_sentence = " ".join(original_sentence.lower().split())
# Replace elements
cleaned_sentence = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"what's", "what is ", cleaned_sentence)
cleaned_sentence = re.sub(r"\'s", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"\'ve", " have ", cleaned_sentence)
cleaned_sentence = re.sub(r"can't", "cannot ", cleaned_sentence)
cleaned_sentence = re.sub(r"n't", " not ", cleaned_sentence)
cleaned_sentence = re.sub(r"i'm", "i am ", cleaned_sentence)
cleaned_sentence = re.sub(r"\'re", " are ", cleaned_sentence)
cleaned_sentence = re.sub(r"\'d", " would ", cleaned_sentence)
cleaned_sentence = re.sub(r"\'ll", " will ", cleaned_sentence)
cleaned_sentence = re.sub(r",", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"\.", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"!", " ! ", cleaned_sentence)
cleaned_sentence = re.sub(r"\/", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"\^", " ^ ", cleaned_sentence)
cleaned_sentence = re.sub(r"\+", " + ", cleaned_sentence)
cleaned_sentence = re.sub(r"\-", " - ", cleaned_sentence)
cleaned_sentence = re.sub(r"\=", " = ", cleaned_sentence)
cleaned_sentence = re.sub(r"'", " ", cleaned_sentence)
cleaned_sentence = re.sub(r"(\d+)(k)", r"\g<1>000", cleaned_sentence)
cleaned_sentence = re.sub(r":", " : ", cleaned_sentence)
cleaned_sentence = re.sub(r" e g ", " eg ", cleaned_sentence)
cleaned_sentence = re.sub(r" b g ", " bg ", cleaned_sentence)
cleaned_sentence = re.sub(r" u s ", " american ", cleaned_sentence)
cleaned_sentence = re.sub(r"\0s", "0", cleaned_sentence)
cleaned_sentence = re.sub(r" 9 11 ", "911", cleaned_sentence)
cleaned_sentence = re.sub(r"e - mail", "email", cleaned_sentence)
cleaned_sentence = re.sub(r"j k", "jk", cleaned_sentence)
cleaned_sentence = re.sub(r"\s{2,}", " ", cleaned_sentence)
# Remove punctuation
cleaned_sentence = "".join([character for character in cleaned_sentence if character not in punctuation])
# Correct simple typos
cleaned_sentence = " ".join([correct_typo(word, word_to_index_dict, known_typo_dict) for word in cleaned_sentence.split()])
cleaned_sentence = " ".join([word for word in cleaned_sentence.split()])
# Check the length of the cleaned sentence
assert cleaned_sentence
return cleaned_sentence
except Exception as exception:
print("Exception for {}: {}".format(original_sentence, exception))
return result_when_failure
def load_file(original_file_path, word_to_index_dict, known_typo_dict):
processed_file_path = os.path.join(os.path.dirname(original_file_path), "processed_" + os.path.basename(original_file_path))
if os.path.isfile(processed_file_path):
print("Loading {} ...".format(processed_file_path))
file_content = pd.read_csv(processed_file_path, encoding="utf-8")
else:
print("Loading {} ...".format(original_file_path))
file_content = pd.read_csv(original_file_path, encoding="utf-8")
print("Cleaning sentences ...")
file_content["processed_question1"] = file_content["question1"].apply(lambda original_sentence: clean_sentence(original_sentence, word_to_index_dict, known_typo_dict))
file_content["processed_question2"] = file_content["question2"].apply(lambda original_sentence: clean_sentence(original_sentence, word_to_index_dict, known_typo_dict))
print("Saving processed file ...")
interesting_column_name_list = ["processed_question1", "processed_question2"]
if "is_duplicate" in file_content.columns:
interesting_column_name_list.append("is_duplicate")
file_content = file_content[interesting_column_name_list]
file_content.to_csv(processed_file_path, index=False)
question1_text_list = file_content["processed_question1"].tolist()
question2_text_list = file_content["processed_question2"].tolist()
if "is_duplicate" in file_content.columns:
is_duplicate_list = file_content["is_duplicate"].tolist()
return question1_text_list, question2_text_list, is_duplicate_list
else:
return question1_text_list, question2_text_list
def load_dataset():
if os.path.isfile(DATASET_FILE_PATH):
print("Loading dataset from disk ...")
dataset_file_content = np.load(DATASET_FILE_PATH)
train_data_1_array = dataset_file_content["train_data_1_array"]
train_data_2_array = dataset_file_content["train_data_2_array"]
test_data_1_array = dataset_file_content["test_data_1_array"]
test_data_2_array = dataset_file_content["test_data_2_array"]
train_label_array = dataset_file_content["train_label_array"]
embedding_matrix = dataset_file_content["embedding_matrix"]
else:
print("Initiating word2vec ...")
word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE_PATH, binary=False)
word_to_index_dict = dict([(word, index) for index, word in enumerate(word2vec.index2word)])
print("word2vec contains {} unique words.".format(len(word_to_index_dict)))
print("Loading text files ...")
known_typo_dict = {}
train_text_1_list, train_text_2_list, train_label_list = load_file(TRAIN_FILE_PATH, word_to_index_dict, known_typo_dict)
test_text_1_list, test_text_2_list = load_file(TEST_FILE_PATH, word_to_index_dict, known_typo_dict)
print("Initiating tokenizer ...")
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train_text_1_list + train_text_2_list + test_text_1_list + test_text_2_list)
print("Dataset contains {} unique words.".format(len(tokenizer.word_index)))
print("Turning texts into sequences ...")
train_sequence_1_list = tokenizer.texts_to_sequences(train_text_1_list)
train_sequence_2_list = tokenizer.texts_to_sequences(train_text_2_list)
test_sequence_1_list = tokenizer.texts_to_sequences(test_text_1_list)
test_sequence_2_list = tokenizer.texts_to_sequences(test_text_2_list)
print("Padding sequences with fixed length ...")
train_data_1_array = pad_sequences(train_sequence_1_list, maxlen=MAX_SEQUENCE_LENGTH, padding="post", truncating="post")
train_data_2_array = pad_sequences(train_sequence_2_list, maxlen=MAX_SEQUENCE_LENGTH, padding="post", truncating="post")
test_data_1_array = pad_sequences(test_sequence_1_list, maxlen=MAX_SEQUENCE_LENGTH, padding="post", truncating="post")
test_data_2_array = pad_sequences(test_sequence_2_list, maxlen=MAX_SEQUENCE_LENGTH, padding="post", truncating="post")
print("Initiating embedding matrix ...")
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1, word2vec.vector_size), dtype=np.float32)
for word, index in tokenizer.word_index.items():
assert word in word_to_index_dict
embedding_matrix[index] = word2vec.word_vec(word)
assert np.sum(np.isclose(np.sum(embedding_matrix, axis=1), 0)) == 1
print("Converting to numpy array ...")
train_label_array = np.array(train_label_list, dtype=np.bool)
print("Saving dataset to disk ...")
np.savez_compressed(DATASET_FILE_PATH,
train_data_1_array=train_data_1_array, train_data_2_array=train_data_2_array,
test_data_1_array=test_data_1_array, test_data_2_array=test_data_2_array,
train_label_array=train_label_array, embedding_matrix=embedding_matrix)
return train_data_1_array, train_data_2_array, test_data_1_array, test_data_2_array, \
train_label_array, embedding_matrix
def init_model(embedding_matrix, learning_rate=0.002):
def get_sentence_feature_extractor(embedding_matrix):
input_tensor = Input(shape=(None,), dtype="int32")
output_tensor = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
input_length=None, mask_zero=True, weights=[embedding_matrix], trainable=False)(input_tensor)
output_tensor = LSTM(output_dim=256, dropout_W=0.3, dropout_U=0.3, activation="tanh", return_sequences=False)(output_tensor)
output_tensor = BatchNormalization()(output_tensor)
output_tensor = Dropout(0.3)(output_tensor)
model = Model(input_tensor, output_tensor)
return model
def get_binary_classifier(input_shape):
input_tensor = Input(shape=input_shape)
output_tensor = Dense(128, activation="relu")(input_tensor)
output_tensor = BatchNormalization()(output_tensor)
output_tensor = Dropout(0.3)(output_tensor)
output_tensor = Dense(1, activation="sigmoid")(output_tensor)
model = Model(input_tensor, output_tensor)
return model
# Initiate the input tensors
input_data_1_tensor = Input(shape=(None,), dtype="int32")
input_data_2_tensor = Input(shape=(None,), dtype="int32")
# Define the sentence feature extractor
sentence_feature_extractor = get_sentence_feature_extractor(embedding_matrix)
input_1_feature_tensor = sentence_feature_extractor(input_data_1_tensor)
input_2_feature_tensor = sentence_feature_extractor(input_data_2_tensor)
merged_feature_1_tensor = merge([input_1_feature_tensor, input_2_feature_tensor], mode="concat")
merged_feature_2_tensor = merge([input_2_feature_tensor, input_1_feature_tensor], mode="concat")
# Define the binary classifier
binary_classifier = get_binary_classifier(input_shape=(K.int_shape(merged_feature_1_tensor)[1],))
output_1_tensor = binary_classifier(merged_feature_1_tensor)
output_2_tensor = binary_classifier(merged_feature_2_tensor)
output_tensor = merge([output_1_tensor, output_2_tensor], mode="concat", concat_axis=1)
output_tensor = Lambda(lambda x: K.mean(x, axis=1, keepdims=True), output_shape=(1,))(output_tensor)
# Define the overall model
model = Model([input_data_1_tensor, input_data_2_tensor], output_tensor)
model.compile(optimizer=Nadam(lr=learning_rate), loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
# Plot the model structures
plot(sentence_feature_extractor, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "sentence_feature_extractor.png"), show_shapes=True, show_layer_names=True)
plot(binary_classifier, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "binary_classifier.png"), show_shapes=True, show_layer_names=True)
plot(model, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "model.png"), show_shapes=True, show_layer_names=True)
return model
class InspectLossAccuracy(Callback):
def __init__(self, *args, **kwargs):
self.split_index = kwargs.pop("split_index", None)
super(InspectLossAccuracy, self).__init__(*args, **kwargs)
self.train_loss_list = []
self.valid_loss_list = []
self.train_acc_list = []
self.valid_acc_list = []
def on_epoch_end(self, epoch, logs=None):
# Loss
train_loss = logs.get("loss")
valid_loss = logs.get("val_loss")
self.train_loss_list.append(train_loss)
self.valid_loss_list.append(valid_loss)
epoch_index_array = np.arange(len(self.train_loss_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_loss_list, "yellowgreen", label="train_loss")
pylab.plot(epoch_index_array, self.valid_loss_list, "lightskyblue", label="valid_loss")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "loss_curve_{}.png".format(self.split_index)))
pylab.close()
# Accuracy
train_acc = logs.get("acc")
valid_acc = logs.get("val_acc")
self.train_acc_list.append(train_acc)
self.valid_acc_list.append(valid_acc)
epoch_index_array = np.arange(len(self.train_acc_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_acc_list, "yellowgreen", label="train_acc")
pylab.plot(epoch_index_array, self.valid_acc_list, "lightskyblue", label="valid_acc")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "accuracy_curve_{}.png".format(self.split_index)))
pylab.close()
def ensemble_predictions(submission_folder_path, proba_column_name):
# Read predictions
submission_file_path_list = glob.glob(os.path.join(submission_folder_path, "submission_*.csv"))
submission_file_content_list = [pd.read_csv(submission_file_path) for submission_file_path in submission_file_path_list]
ensemble_submission_file_content = submission_file_content_list[0]
print("There are {} submissions in total.".format(len(submission_file_path_list)))
# Concatenate predictions
proba_array = np.array([submission_file_content[proba_column_name].as_matrix() for submission_file_content in submission_file_content_list])
# Ensemble predictions
for ensemble_func, ensemble_submission_file_name in zip([np.max, np.min, np.mean, np.median], ["max.csv", "min.csv", "mean.csv", "median.csv"]):
ensemble_submission_file_path = os.path.join(submission_folder_path, os.pardir, ensemble_submission_file_name)
ensemble_submission_file_content[proba_column_name] = ensemble_func(proba_array, axis=0)
ensemble_submission_file_content.to_csv(ensemble_submission_file_path, index=False)
def run():
print("Creating folders ...")
os.makedirs(OPTIMAL_WEIGHTS_FOLDER_PATH, exist_ok=True)
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
print("Loading dataset ...")
train_data_1_array, train_data_2_array, test_data_1_array, test_data_2_array, train_label_array, embedding_matrix = load_dataset()
print("Initializing model ...")
model = init_model(embedding_matrix)
vanilla_weights = model.get_weights()
cv_object = StratifiedKFold(n_splits=SPLIT_NUM, random_state=RANDOM_STATE)
for split_index, (train_index_array, valid_index_array) in enumerate(cv_object.split(np.zeros((len(train_label_array), 1)), train_label_array), start=1):
print("Working on splitting fold {} ...".format(split_index))
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "submission_{}.csv".format(split_index))
if os.path.isfile(submission_file_path):
print("The submission file already exists.")
continue
optimal_weights_file_path = os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "optimal_weights_{}.h5".format(split_index))
if os.path.isfile(optimal_weights_file_path):
print("The optimal weights file already exists.")
else:
print("Dividing the vanilla training dataset to actual training/validation dataset ...")
actual_train_data_1_array, actual_train_data_2_array, actual_train_label_array = train_data_1_array[train_index_array], train_data_2_array[train_index_array], train_label_array[train_index_array]
actual_valid_data_1_array, actual_valid_data_2_array, actual_valid_label_array = train_data_1_array[valid_index_array], train_data_2_array[valid_index_array], train_label_array[valid_index_array]
print("Calculating class weight ...")
train_mean_prediction = np.mean(actual_train_label_array)
train_class_weight = {0: (1 - TARGET_MEAN_PREDICTION) / (1 - train_mean_prediction), 1: TARGET_MEAN_PREDICTION / train_mean_prediction}
valid_mean_prediction = np.mean(actual_valid_label_array)
valid_class_weight = {0: (1 - TARGET_MEAN_PREDICTION) / (1 - valid_mean_prediction), 1: TARGET_MEAN_PREDICTION / valid_mean_prediction}
print("Startting with vanilla weights ...")
model.set_weights(vanilla_weights)
print("Performing the training procedure ...")
valid_sample_weights = np.ones(len(actual_valid_label_array)) * valid_class_weight[1]
valid_sample_weights[np.logical_not(actual_valid_label_array)] = valid_class_weight[0]
earlystopping_callback = EarlyStopping(monitor="val_loss", patience=PATIENCE)
modelcheckpoint_callback = ModelCheckpoint(optimal_weights_file_path, monitor="val_loss", save_best_only=True, save_weights_only=True)
inspectlossaccuracy_callback = InspectLossAccuracy(split_index=split_index)
model.fit([actual_train_data_1_array, actual_train_data_2_array], actual_train_label_array, batch_size=BATCH_SIZE,
validation_data=([actual_valid_data_1_array, actual_valid_data_2_array], actual_valid_label_array, valid_sample_weights),
callbacks=[earlystopping_callback, modelcheckpoint_callback, inspectlossaccuracy_callback],
class_weight=train_class_weight, nb_epoch=MAXIMUM_EPOCH_NUM, verbose=2)
assert os.path.isfile(optimal_weights_file_path)
model.load_weights(optimal_weights_file_path)
print("Performing the testing procedure ...")
prediction_array = model.predict([test_data_1_array, test_data_2_array], batch_size=BATCH_SIZE, verbose=2)
submission_file_content = pd.DataFrame({"test_id": np.arange(len(prediction_array)), "is_duplicate": np.squeeze(prediction_array)})
submission_file_content.to_csv(submission_file_path, index=False)
print("Performing ensembling ...")
ensemble_predictions(submission_folder_path=SUBMISSION_FOLDER_PATH, proba_column_name="is_duplicate")
print("All done!")
if __name__ == "__main__":
run()
|
mit
|
philippjfr/bokeh
|
bokeh/sampledata/degrees.py
|
2
|
2518
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a table of data regarding bachelors degrees earned by women,
broken down by field for any given year. It exposes an attribute ``data`` which
is a pandas DataFrame with the following fields:
Year
Agriculture
Architecture
Art and Performance
Biology
Business
Communications and Journalism
Computer Science,Education
Engineering
English
Foreign Languages
Health Professions
Math and Statistics
Physical Sciences
Psychology
Public Administration
Social Sciences and History
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = package_csv('degrees', 'percent-bachelors-degrees-women-usa.csv')
|
bsd-3-clause
|
ToFuProject/tofu
|
tofu/data/_core_new.py
|
1
|
48585
|
# -*- coding: utf-8 -*-
# Built-in
import sys
import os
# import itertools as itt
import copy
import warnings
from abc import ABCMeta, abstractmethod
import inspect
# Common
import numpy as np
import scipy.interpolate as scpinterp
# import matplotlib.pyplot as plt
# from matplotlib.tri import Triangulation as mplTri
# tofu
# from tofu import __version__ as __version__
import tofu.utils as utils
from . import _check_inputs
from . import _comp
from . import _comp_new
from . import _plot_new
from . import _def
from . import _comp_spectrallines
__all__ = ['DataCollection'] # , 'TimeTraceCollection']
_INTERPT = 'zero'
_GROUP_0D = 'time'
_GROUP_1D = 'radius'
_GROUP_2D = 'mesh2d'
#############################################
#############################################
# Abstract Parent class
#############################################
#############################################
class DataCollection(utils.ToFuObject):
""" A generic class for handling data
Provides methods for:
- introspection
- plateaux finding
- visualization
"""
__metaclass__ = ABCMeta
# Fixed (class-wise) dictionary of default properties
_ddef = {
'Id': {'include': ['Mod', 'Cls', 'Name', 'version']},
'params': {
'ddata': {
'source': (str, 'unknown'),
'dim': (str, 'unknown'),
'quant': (str, 'unknown'),
'name': (str, 'unknown'),
'units': (str, 'a.u.'),
},
'dobj': {},
},
}
_forced_group = None
if _forced_group is not None:
_allowed_groups = [_forced_group]
else:
_allowed_groups = None
# _dallowed_params = None
_data_none = None
_reserved_keys = None
_show_in_summary_core = ['shape', 'ref', 'group']
_show_in_summary = 'all'
_max_ndim = None
_dgroup = {}
_dref = {}
_dref_static = {}
_ddata = {}
_dobj = {}
_group0d = _GROUP_0D
_group1d = _GROUP_1D
_group2d = _GROUP_2D
def __init_subclass__(cls, **kwdargs):
# Does not exist before Python 3.6 !!!
# Python 2
super(DataCollection, cls).__init_subclass__(**kwdargs)
# Python 3
# super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(DataCollection._ddef)
# cls._dplot = copy.deepcopy(Struct._dplot)
# cls._set_color_ddef(cls._color)
def __init__(
self,
dgroup=None,
dref=None,
dref_static=None,
ddata=None,
dobj=None,
Id=None,
Name=None,
fromdict=None,
SavePath=None,
include=None,
sep=None,
):
# Create a dplot at instance level
# self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
super().__init__(**kwdargs)
def _reset(self):
# Run by the parent class __init__()
super()._reset()
self._dgroup = {}
self._dref = {}
self._dref_static = {}
self._ddata = {}
self._dobj = {}
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
include=None, **kwdargs):
if Id is not None:
assert isinstance(Id, utils.ID)
Name = Id.Name
# assert isinstance(Name, str), Name
if include is None:
include = cls._ddef['Id']['include']
kwdargs.update({'Name': Name, 'include': include})
return kwdargs
###########
# Get check and format inputs
###########
###########
# _init
###########
def _init(
self,
dgroup=None,
dref=None,
dref_static=None,
ddata=None,
dobj=None,
**kwargs,
):
self.update(
dgroup=dgroup,
dref=dref,
dref_static=dref_static,
ddata=ddata,
dobj=dobj,
)
self._dstrip['strip'] = 0
###########
# set dictionaries
###########
def update(
self,
dobj=None,
ddata=None,
dref=None,
dref_static=None,
dgroup=None,
):
""" Can be used to set/add data/ref/group
Will update existing attribute with new dict
"""
# Check consistency
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs._consistency(
dobj=dobj, dobj0=self._dobj,
ddata=ddata, ddata0=self._ddata,
dref=dref, dref0=self._dref,
dref_static=dref_static, dref_static0=self._dref_static,
dgroup=dgroup, dgroup0=self._dgroup,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params']['ddata'],
ddefparams_obj=self._ddef['params']['dobj'],
data_none=self._data_none,
max_ndim=self._max_ndim,
)
# ---------------------
# Adding group / ref / quantity one by one
# ---------------------
def add_group(self, group=None):
# Check consistency
self.update(ddata=None, dref=None, dref_static=None, dgroup=group)
def add_ref(self, key=None, group=None, data=None, **kwdargs):
dref = {key: {'group': group, 'data': data, **kwdargs}}
# Check consistency
self.update(ddata=None, dref=dref, dref_static=None, dgroup=None)
# TBF
def add_ref_static(self, key=None, which=None, **kwdargs):
dref_static = {which: {key: kwdargs}}
# Check consistency
self.update(
ddata=None, dref=None, dref_static=dref_static, dgroup=None,
)
def add_data(self, key=None, data=None, ref=None, **kwdargs):
ddata = {key: {'data': data, 'ref': ref, **kwdargs}}
# Check consistency
self.update(ddata=ddata, dref=None, dref_static=None, dgroup=None)
def add_obj(self, which=None, key=None, **kwdargs):
dobj = {which: {key: kwdargs}}
# Check consistency
self.update(dobj=dobj, dref=None, dref_static=None, dgroup=None)
# ---------------------
# Removing group / ref / quantities
# ---------------------
def remove_group(self, group=None):
""" Remove a group (or list of groups) and all associated ref, data """
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs._remove_group(
group=group,
dgroup0=self._dgroup, dref0=self._dref, ddata0=self._ddata,
dref_static0=self._dref_static,
dobj0=self._dobj,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params']['ddata'],
ddefparams_obj=self._ddef['params']['dobj'],
data_none=self._data_none,
max_ndim=self._max_ndim,
)
def remove_ref(self, key=None, propagate=None):
""" Remove a ref (or list of refs) and all associated data """
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs._remove_ref(
key=key,
dgroup0=self._dgroup, dref0=self._dref, ddata0=self._ddata,
dref_static0=self._dref_static,
dobj0=self._dobj,
propagate=propagate,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params']['ddata'],
ddefparams_obj=self._ddef['params']['dobj'],
data_none=self._data_none,
max_ndim=self._max_ndim,
)
def remove_ref_static(self, key=None, which=None, propagate=None):
""" Remove a static ref (or list) or a whole category
key os provided:
=> remove only the desired key(s)
works only if key is not used in ddata and dobj
which is provided:
=> treated as param, the whole category of ref_static is removed
if propagate, the parameter is removed from ddata and dobj
"""
_check_inputs._remove_ref_static(
key=key,
which=which,
propagate=propagate,
dref_static0=self._dref_static,
ddata0=self._ddata,
dobj0=self._dobj,
)
def remove_data(self, key=None, propagate=True):
""" Remove a data (or list of data) """
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs._remove_data(
key=key,
dgroup0=self._dgroup, dref0=self._dref, ddata0=self._ddata,
dref_static0=self._dref_static,
dobj0=self._dobj,
propagate=propagate,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params']['ddata'],
ddefparams_obj=self._ddef['params']['dobj'],
data_none=self._data_none,
max_ndim=self._max_ndim,
)
def remove_obj(self, key=None, which=None, propagate=True):
""" Remove a data (or list of data) """
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs._remove_obj(
key=key,
which=which,
dobj0=self._dobj,
ddata0=self._ddata,
dgroup0=self._dgroup,
dref0=self._dref,
dref_static0=self._dref_static,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params']['ddata'],
ddefparams_obj=self._ddef['params']['dobj'],
data_none=self._data_none,
max_ndim=self._max_ndim,
)
# ---------------------
# Get / set / add / remove params
# ---------------------
def __check_which(self, which=None, return_dict=None):
""" Check which in ['data'] + list(self._dobj.keys() """
return _check_inputs._check_which(
ddata=self._ddata,
dobj=self._dobj,
which=which,
return_dict=return_dict,
)
def get_lparam(self, which=None):
""" Return the list of params for the chosen dict ('data' or dobj[<>])
"""
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
lp = list(list(dd.values())[0].keys())
if which == 'data':
lp.remove('data')
return lp
def get_param(
self,
param=None,
key=None,
ind=None,
returnas=None,
which=None,
):
""" Return the array of the chosen parameter (or list of parameters)
Can be returned as:
- dict: {param0: {key0: values0, key1: value1...}, ...}
- np[.ndarray: {param0: np.r_[values0, value1...], ...}
"""
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
return _check_inputs._get_param(
dd=dd, dd_name=which,
param=param, key=key, ind=ind, returnas=returnas,
)
def set_param(
self,
param=None,
value=None,
ind=None,
key=None,
which=None,
):
""" Set the value of a parameter
value can be:
- None
- a unique value (int, float, bool, str, tuple) common to all keys
- an iterable of vlues (array, list) => one for each key
A subset of keys can be chosen (ind, key, fed to self.select()) to set
only the value of some key
"""
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
_check_inputs._set_param(
dd=dd, dd_name=which,
param=param, value=value, ind=ind, key=key,
)
def add_param(
self,
param,
value=None,
which=None,
):
""" Add a parameter, optionnally also set its value """
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
_check_inputs._add_param(
dd=dd, dd_name=which,
param=param, value=value,
)
def remove_param(
self,
param=None,
which=None,
):
""" Remove a parameter, none by default, all if param = 'all' """
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
_check_inputs._remove_param(
dd=dd, dd_name=which,
param=param,
)
###########
# strip dictionaries
###########
def _strip_ddata(self, strip=0, verb=0):
pass
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0, 1]
nMax = max(cls._dstrip['allowed'])
doc = """
1: None
"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, verb=True):
# super()
super(DataCollection, self).strip(strip=strip, verb=verb)
def _strip(self, strip=0, verb=True):
self._strip_ddata(strip=strip, verb=verb)
def _to_dict(self):
dout = {
'dgroup': {'dict': self._dgroup, 'lexcept': None},
'dref': {'dict': self._dref, 'lexcept': None},
'dref_static': {'dict': self._dref_static, 'lexcept': None},
'ddata': {'dict': self._ddata, 'lexcept': None},
'dobj': {'dict': self._dobj, 'lexcept': None},
}
return dout
def _from_dict(self, fd):
for k0 in ['dgroup', 'dref', 'ddata', 'dref_static', 'dobj']:
if fd.get(k0) is not None:
getattr(self, '_'+k0).update(**fd[k0])
self.update()
###########
# properties
###########
@property
def dgroup(self):
""" The dict of groups """
return self._dgroup
@property
def dref(self):
""" the dict of references """
return self._dref
@property
def dref_static(self):
""" the dict of references """
return self._dref_static
@property
def ddata(self):
""" the dict of data """
return self._ddata
@property
def dobj(self):
""" the dict of obj """
return self._dobj
###########
# General use methods
###########
def to_DataFrame(self, which=None):
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
import pandas as pd
return pd.DataFrame(dd)
# ---------------------
# Key selection methods
# ---------------------
def select(self, which=None, log=None, returnas=None, **kwdargs):
""" Return the indices / keys of data matching criteria
The selection is done comparing the value of all provided parameters
The result is a boolean indices array, optionally with the keys list
It can include:
- log = 'all': only the data matching all criteria
- log = 'any': the data matching any criterion
If log = 'raw', a dict of indices arrays is returned, showing the
details for each criterion
"""
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
return _check_inputs._select(
dd=dd, dd_name=which,
log=log, returnas=returnas, **kwdargs,
)
def _ind_tofrom_key(
self,
ind=None,
key=None,
group=None,
returnas=int,
which=None,
):
""" Return ind from key or key from ind for all data """
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
return _check_inputs._ind_tofrom_key(
dd=dd, dd_name=which, ind=ind, key=key,
group=group, dgroup=self._dgroup,
returnas=returnas,
)
def _get_sort_index(self, which=None, param=None):
""" Return sorting index ofself.ddata dict """
if param is None:
return
if param == 'key':
ind = np.argsort(list(dd.keys()))
elif isinstance(param, str):
ind = np.argsort(
self.get_param(param, which=which, returnas=np.ndarray)[param]
)
else:
msg = "Arg param must be a valid str\n Provided: {}".format(param)
raise Exception(msg)
return ind
def sortby(self, param=None, order=None, which=None):
""" sort the self.ddata dict by desired parameter """
# Trivial case
if len(self._ddata) == 0 and len(self._dobj) == 0:
return
# --------------
# Check inputs
# order
if order is None:
order = 'increasing'
c0 = order in ['increasing', 'reverse']
if not c0:
msg = (
"""
Arg order must be in [None, 'increasing', 'reverse']
Provided: {}
""".format(order)
)
raise Exception(msg)
# which
which, dd = self.__check_which(which, return_dict=True)
if which is None:
return
# --------------
# sort
ind = self._get_sort_index(param=param, which=which)
if ind is None:
return
if order == 'reverse':
ind = ind[::-1]
lk = list(dd.keys())
dd = {lk[ii]: dd[lk[ii]] for ii in ind}
if which == 'data':
self._ddata = dd
else:
self._dobj[which] = dd
# ---------------------
# Get refs from data key
# ---------------------
def _get_ref_from_key(self, key=None, group=None):
""" Get the key of the ref in chosen group """
# Check input
if key not in self._ddata.keys():
msg = "Provide a valid data key!\n\t- Provided: {}".format(key)
raise Exception(msg)
ref = self._ddata[key]['ref']
if len(ref) > 1:
if group not in self._dgroup.keys():
msg = "Provided group is not valid!\n\t{}".format(group)
raise Exception(msg)
ref = [rr for rr in ref if self._dref[rr]['group'] == group]
if len(ref) != 1:
msg = "Ambiguous ref for key {}!\n\t- {}".format(key, ref)
raise Exception(msg)
return ref[0]
# ---------------------
# Switch ref
# ---------------------
def switch_ref(self, new_ref=None):
"""Use the provided key as ref (if valid) """
self._dgroup, self._dref, self._dref_static, self._ddata, self._dobj =\
_check_inputs.switch_ref(
new_ref=new_ref,
ddata=self._ddata,
dref=self._dref,
dgroup=self._dgroup,
dobj0=self._dobj,
dref_static0=self._dref_static,
allowed_groups=self._allowed_groups,
reserved_keys=self._reserved_keys,
ddefparams_data=self._ddef['params'].get('data'),
data_none=self._data_none,
max_ndim=self._max_ndim,
)
# ---------------------
# Methods for getting a subset of the collection
# ---------------------
# TBC
def get_drefddata_as_input(self, key=None, ind=None, group=None):
lk = self._ind_tofrom_key(ind=ind, key=key, group=group, returnas=str)
lkr = [kr for kr in self._dref['lkey']
if any([kr in self._ddata['dict'][kk]['refs'] for kk in lk])]
dref = {kr: {'data': self._ddata['dict'][kr]['data'],
'group': self._dref['dict'][kr]['group']} for kr in lkr}
lkr = dref.keys()
ddata = {kk: self._ddata['dict'][kk] for kk in lk if kk not in lkr}
return dref, ddata
# TBC
def get_subset(self, key=None, ind=None, group=None, Name=None):
if key is None and ind is None:
return self
else:
dref, ddata = self.get_drefddata_as_input(key=key, ind=ind,
group=group)
if Name is None and self.Id.Name is not None:
Name = self.Id.Name + '-subset'
return self.__class__(dref=dref, ddata=ddata, Name=Name)
# ---------------------
# Methods for exporting plot collection (subset)
# ---------------------
# TBC
def to_PlotCollection(self, key=None, ind=None, group=None, Name=None,
dnmax=None, lib='mpl'):
dref, ddata = self.get_drefddata_as_input(
key=key, ind=ind, group=group,
)
if Name is None and self.Id.Name is not None:
Name = self.Id.Name + '-plot'
import tofu.data._core_plot as _core_plot
if lib == 'mpl':
cls = _core_plot.DataCollectionPlot_mpl
else:
raise NotImplementedError
obj = cls(dref=dref, ddata=ddata, Name=Name)
if dnmax is not None:
obj.set_dnmax(dnmax)
return obj
# ---------------------
# Methods for showing data
# ---------------------
def get_summary(self, show=None, show_core=None,
sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
""" Summary description of the object content """
# # Make sure the data is accessible
# msg = "The data is not accessible because self.strip(2) was used !"
# assert self._dstrip['strip']<2, msg
lcol, lar = [], []
# -----------------------
# Build for groups
if len(self._dgroup) > 0:
lcol.append(['group', 'nb. ref', 'nb. data'])
lar.append([
(
k0,
len(self._dgroup[k0]['lref']),
len(self._dgroup[k0]['ldata']),
)
for k0 in self._dgroup.keys()
])
# -----------------------
# Build for refs
if len(self._dref) > 0:
lcol.append(['ref key', 'group', 'size', 'nb. data'])
lar.append([
(
k0,
self._dref[k0]['group'],
str(self._dref[k0]['size']),
len(self._dref[k0]['ldata'])
)
for k0 in self._dref.keys()
])
# -----------------------
# Build for ddata
if len(self._ddata) > 0:
if show_core is None:
show_core = self._show_in_summary_core
if isinstance(show_core, str):
show_core = [show_core]
lp = self.get_lparam(which='data')
lkcore = ['shape', 'group', 'ref']
assert all([ss in lp + lkcore for ss in show_core])
col2 = ['data key'] + show_core
if show is None:
show = self._show_in_summary
if show == 'all':
col2 += [pp for pp in lp if pp not in col2]
else:
if isinstance(show, str):
show = [show]
assert all([ss in lp for ss in show])
col2 += [pp for pp in show if pp not in col2]
ar2 = []
for k0 in self._ddata.keys():
lu = [k0] + [str(self._ddata[k0].get(cc)) for cc in col2[1:]]
ar2.append(lu)
lcol.append(col2)
lar.append(ar2)
# -----------------------
# Build for dref_static
if len(self._dref_static) > 0:
for k0, v0 in self._dref_static.items():
lk = list(list(v0.values())[0].keys())
col = [k0] + [pp for pp in lk]
ar = [
tuple([k1] + [str(v1[kk]) for kk in lk])
for k1, v1 in v0.items()
]
lcol.append(col)
lar.append(ar)
# -----------------------
# Build for dobj
if len(self._dobj) > 0:
for k0, v0 in self._dobj.items():
lk = self.get_lparam(which=k0)
lcol.append([k0] + [pp for pp in lk])
lar.append([
tuple([k1] + [str(v1[kk]) for kk in lk])
for k1, v1 in v0.items()
])
return self._get_summary(
lar, lcol,
sep=sep, line=line, table_sep=table_sep,
verb=verb, return_=return_)
# -----------------
# conversion wavelength - energy - frequency
# ------------------
@staticmethod
def convert_spectral(
data=None,
units_in=None, units_out=None,
returnas=None,
):
""" convert wavelength / energy/ frequency
Available units:
wavelength: m, mm, um, nm, A
energy: J, eV, keV
frequency: Hz, kHz, MHz, GHz
Can also just return the conversion coef if returnas='coef'
"""
return _comp_spectrallines.convert_spectral(
data_in=data, units_in=units_in, units_out=units_out,
returnas=returnas,
)
# -----------------
# Get common ref
# ------------------
def _get_common_ref_data_nearest(
self,
group=None,
lkey=None,
return_all=None,
):
""" Typically used to get a common (intersection) time vector
Returns a time vector that contains all time points from all data
Also return a dict of indices to easily remap each time vector to tall
such that t[ind] = tall (with nearest approximation)
"""
return _comp_new._get_unique_ref_dind(
dd=self._ddata, group=group,
lkey=lkey, return_all=return_all,
)
def _get_pts_from_mesh(self, key=None):
""" Get default pts from a mesh """
# Check key is relevant
c0 = (
key in self._ddata.keys()
and isinstance(self._ddata[key].get('data'), dict)
and 'type' in self._ddata[key]['data'].keys()
)
if not c0:
msg = (
"ddata['{}'] does not exist or is not a mesh".format(key)
)
raise Exception(msg)
if self.ddata[key]['data']['type'] == 'rect':
if self.ddata[key]['data']['shapeRZ'] == ('R', 'Z'):
R = np.repeat(self.ddata[key]['data']['R'],
self.ddata[key]['data']['nZ'])
Z = np.tile(self.ddata[key]['data']['Z'],
self.ddata[key]['data']['nR'])
else:
R = np.tile(self.ddata[key]['data']['R'],
self.ddata[key]['data']['nZ'])
Z = np.repeat(self.ddata[key]['data']['Z'],
self.ddata[key]['data']['nR'])
pts = np.array([R, np.zeros((R.size,)), Z])
else:
pts = self.ddata[key]['data']['nodes']
pts = np.array([
pts[:, 0], np.zeros((pts.shape[0],)), pts[:, 1],
])
return pts
# ---------------------
# Method for interpolation - inputs checks
# ---------------------
# Useful?
@property
def _get_lquant_both(self, group1d=None, group2d=None):
""" Return list of quantities available both in 1d and 2d """
lq1 = [
self._ddata[vd]['quant'] for vd in self._dgroup[group1d]['ldata']
]
lq2 = [
self._ddata[vd]['quant'] for vd in self._dgroup[group2d]['ldata']
]
lq = list(set(lq1).intersection(lq2))
return lq
def _check_qr12RPZ(
self,
quant=None,
ref1d=None,
ref2d=None,
q2dR=None,
q2dPhi=None,
q2dZ=None,
group1d=None,
group2d=None,
):
if group1d is None:
group1d = self._group1d
if group2d is None:
group2d = self._group2d
lc0 = [quant is None, ref1d is None, ref2d is None]
lc1 = [q2dR is None, q2dPhi is None, q2dZ is None]
if np.sum([all(lc0), all(lc1)]) != 1:
msg = (
"Please provide either (xor):\n"
+ "\t- a scalar field (isotropic emissivity):\n"
+ "\t\tquant : scalar quantity to interpolate\n"
+ "\t\t\tif quant is 1d, intermediate reference\n"
+ "\t\t\tfields are necessary for 2d interpolation\n"
+ "\t\tref1d : 1d reference field on which to interpolate\n"
+ "\t\tref2d : 2d reference field on which to interpolate\n"
+ "\t- a vector (R,Phi,Z) field (anisotropic emissivity):\n"
+ "\t\tq2dR : R component of the vector field\n"
+ "\t\tq2dPhi: R component of the vector field\n"
+ "\t\tq2dZ : Z component of the vector field\n"
+ "\t\t=> all components have the same time and mesh!\n"
)
raise Exception(msg)
# Check requested quant is available in 2d or 1d
if all(lc1):
idquant, idref1d, idref2d = _check_inputs._get_possible_ref12d(
dd=self._ddata,
key=quant, ref1d=ref1d, ref2d=ref2d,
group1d=group1d,
group2d=group2d,
)
idq2dR, idq2dPhi, idq2dZ = None, None, None
ani = False
else:
idq2dR, msg = _check_inputs._get_keyingroup_ddata(
dd=self._ddata,
key=q2dR, group=group2d, msgstr='quant', raise_=True,
)
idq2dPhi, msg = _check_inputs._get_keyingroup_ddata(
dd=self._ddata,
key=q2dPhi, group=group2d, msgstr='quant', raise_=True,
)
idq2dZ, msg = _check_inputs._get_keyingroup_ddata(
dd=self._ddata,
key=q2dZ, group=group2d, msgstr='quant', raise_=True,
)
idquant, idref1d, idref2d = None, None, None
ani = True
return idquant, idref1d, idref2d, idq2dR, idq2dPhi, idq2dZ, ani
# ---------------------
# Method for interpolation
# ---------------------
def _get_finterp(
self,
idquant=None, idref1d=None, idref2d=None, idmesh=None,
idq2dR=None, idq2dPhi=None, idq2dZ=None,
interp_t=None, interp_space=None,
fill_value=None, ani=None, Type=None,
group0d=None, group2d=None,
):
if interp_t is None:
interp_t = 'nearest'
if interp_t != 'nearest':
msg = "'nearest' is the only time-interpolation method available"
raise NotImplementedError(msg)
if group0d is None:
group0d = self._group0d
if group2d is None:
group2d = self._group2d
# Get idmesh
if idmesh is None:
if idquant is not None:
# isotropic
if idref1d is None:
lidmesh = [qq for qq in self._ddata[idquant]['ref']
if self._dref[qq]['group'] == group2d]
else:
lidmesh = [qq for qq in self._ddata[idref2d]['ref']
if self._dref[qq]['group'] == group2d]
else:
# anisotropic
assert idq2dR is not None
lidmesh = [qq for qq in self._ddata[idq2dR]['ref']
if self._dref[qq]['group'] == group2d]
assert len(lidmesh) == 1
idmesh = lidmesh[0]
# Get common time indices
if interp_t == 'nearest':
tall, tbinall, ntall, dind = _comp_new._get_tcom(
idquant, idref1d, idref2d, idq2dR,
dd=self._ddata, group=group0d,
)
# Get mesh
if self._ddata[idmesh]['data']['type'] == 'rect':
mpltri = None
trifind = self._ddata[idmesh]['data']['trifind']
else:
mpltri = self._ddata[idmesh]['data']['mpltri']
trifind = mpltri.get_trifinder()
# # Prepare output
# Interpolate
# Note : Maybe consider using scipy.LinearNDInterpolator ?
if idquant is not None:
vquant = self._ddata[idquant]['data']
c0 = (
self._ddata[idmesh]['data']['type'] == 'quadtri'
and self._ddata[idmesh]['data']['ntri'] > 1
)
if c0:
vquant = np.repeat(
vquant,
self._ddata[idmesh]['data']['ntri'],
axis=0,
)
vr1 = self._ddata[idref1d]['data'] if idref1d is not None else None
vr2 = self._ddata[idref2d]['data'] if idref2d is not None else None
# add time dimension if none
if vquant.ndim == 1:
vquant = vquant[None, :]
if vr1.ndim == 1:
vr1 = vr1[None, :]
if vr2.ndim == 1:
vr2 = vr2[None, :]
else:
vq2dR = self._ddata[idq2dR]['data']
vq2dPhi = self._ddata[idq2dPhi]['data']
vq2dZ = self._ddata[idq2dZ]['data']
# add time dimension if none
if vq2dR.ndim == 1:
vq2dR = vq2dR[None, :]
if vq2dPhi.ndim == 1:
vq2dPhi = vq2dPhi[None, :]
if vq2dZ.ndim == 1:
vq2dZ = vq2dZ[None, :]
if interp_space is None:
interp_space = self._ddata[idmesh]['data']['ftype']
# get interpolation function
if ani:
# Assuming same mesh and time vector for all 3 components
func = _comp.get_finterp_ani(
idq2dR, idq2dPhi, idq2dZ,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value,
idmesh=idmesh, vq2dR=vq2dR,
vq2dZ=vq2dZ, vq2dPhi=vq2dPhi,
tall=tall, tbinall=tbinall, ntall=ntall,
indtq=dind.get(idquant),
trifind=trifind, Type=Type, mpltri=mpltri,
)
else:
func = _comp.get_finterp_isotropic(
idquant, idref1d, idref2d,
vquant=vquant, vr1=vr1, vr2=vr2,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value,
idmesh=idmesh,
tall=tall, tbinall=tbinall, ntall=ntall,
mpltri=mpltri, trifind=trifind,
indtq=dind.get(idquant),
indtr1=dind.get(idref1d), indtr2=dind.get(idref2d),
)
return func
def _interp_pts2d_to_quant1d(
self,
pts=None,
vect=None,
t=None,
quant=None,
ref1d=None,
ref2d=None,
q2dR=None,
q2dPhi=None,
q2dZ=None,
interp_t=None,
interp_space=None,
fill_value=None,
Type=None,
group0d=None,
group1d=None,
group2d=None,
return_all=None,
):
""" Return the value of the desired 1d quantity at 2d points
For the desired inputs points (pts):
- pts are in (X, Y, Z) coordinates
- space interpolation is linear on the 1d profiles
At the desired input times (t):
- using a nearest-neighbourg approach for time
"""
# Check inputs
if group0d is None:
group0d = self._group0d
if group1d is None:
group1d = self._group1d
if group2d is None:
group2d = self._group2d
# msg = "Only 'nearest' available so far for interp_t!"
# assert interp_t == 'nearest', msg
# Check requested quant is available in 2d or 1d
idquant, idref1d, idref2d, idq2dR, idq2dPhi, idq2dZ, ani = \
self._check_qr12RPZ(
quant=quant, ref1d=ref1d, ref2d=ref2d,
q2dR=q2dR, q2dPhi=q2dPhi, q2dZ=q2dZ,
group1d=group1d, group2d=group2d,
)
# Check the pts is (3,...) array of floats
idmesh = None
if pts is None:
# Identify mesh to get default points
if ani:
idmesh = [id_ for id_ in self._ddata[idq2dR]['ref']
if self._dref[id_]['group'] == group2d][0]
else:
if idref1d is None:
idmesh = [id_ for id_ in self._ddata[idquant]['ref']
if self._dref[id_]['group'] == group2d][0]
else:
idmesh = [id_ for id_ in self._ddata[idref2d]['ref']
if self._dref[id_]['group'] == group2d][0]
# Derive pts
pts = self._get_pts_from_mesh(key=idmesh)
pts = np.atleast_2d(pts)
if pts.shape[0] != 3:
msg = (
"pts must be np.ndarray of (X,Y,Z) points coordinates\n"
+ "Can be multi-dimensional, but 1st dimension is (X,Y,Z)\n"
+ " - Expected shape : (3,...)\n"
+ " - Provided shape : {}".format(pts.shape)
)
raise Exception(msg)
# Check t
lc = [t is None, type(t) is str, type(t) is np.ndarray]
assert any(lc)
if lc[1]:
assert t in self._ddata.keys()
t = self._ddata[t]['data']
# Interpolation (including time broadcasting)
# this is the second slowest step (~0.08 s)
func = self._get_finterp(
idquant=idquant, idref1d=idref1d, idref2d=idref2d,
idq2dR=idq2dR, idq2dPhi=idq2dPhi, idq2dZ=idq2dZ,
idmesh=idmesh,
interp_t=interp_t, interp_space=interp_space,
fill_value=fill_value, ani=ani, Type=Type,
group0d=group0d, group2d=group2d,
)
# Check vect of ani
c0 = (
ani is True
and (
vect is None
or not (
isinstance(vect, np.ndarray)
and vect.shape == pts.shape
)
)
)
if c0:
msg = (
"Anisotropic field interpolation needs a field of local vect\n"
+ " => Please provide vect as (3, npts) np.ndarray!"
)
raise Exception(msg)
# This is the slowest step (~1.8 s)
val, t = func(pts, vect=vect, t=t)
# return
if return_all is None:
return_all = True
if return_all is True:
dout = {
't': t,
'pts': pts,
'ref1d': idref1d,
'ref2d': idref2d,
'q2dR': idq2dR,
'q2dPhi': idq2dPhi,
'q2dZ': idq2dZ,
'interp_t': interp_t,
'interp_space': interp_space,
}
return val, dout
else:
return val
# TBC
def _interp_one_dim(x=None, ind=None, key=None, group=None,
kind=None, bounds_error=None, fill_value=None):
""" Return a dict of interpolated data
Uses scipy.inpterp1d with args:
- kind, bounds_error, fill_value
The interpolated data is chosen method select() with args:
- key, ind
The interpolation is done against a reference vector x
- x can be a key to an existing ref
- x can be user-provided array
in thay case the group should be specified
(to properly identify the interpolation dimension)
Returns:
--------
dout: dict
dict of interpolated data
dfail: dict of failed interpolations, with error messages
"""
# Check x
assert x is not None
if isinstance(x) is str:
if x not in self.lref:
msg = "If x is a str, it must be a valid ref!\n"
msg += " - x: {}\n".format(x)
msg += " - self.lref: {}".format(self.lref)
raise Exception(msg)
group = self._dref[x]['group']
x = self._ddata[x]['data']
else:
try:
x = np.atleast_1d(x).ravel()
except Exception:
msg = (
"The reference with which to interpolate, x, should be:\n"
+ " - a key to an existing ref\n"
+ " - a 1d np.ndarray"
)
raise Exception(x)
if group not in self.lgroup:
msg = "Interpolation must be with respect to a group\n"
msg += "Provided group is not in self.lgroup:\n"
msg += " - group: {}".format(group)
raise Exception(msg)
# Get keys to interpolate
if ind is None and key in None:
lk = self._dgroup[group]['ldata']
else:
lk = self._ind_tofrom_key(ind=ind, key=key, returnas=str)
# Check provided keys are relevant, and get dim index
dind, dfail = {}, {}
for kk in lk:
if kk not in self._dgroup[group]['ldata']:
# gps = self._ddata[kk]['groups']
# msg = "Some data not in interpolation group:\n"
# msg += " - self.ddata[%s]['groups'] = %s"%(kk,str(gps))
# msg += " - Interpolation group: %s"%group
# raise Exception(msg)
dfail[kk] = "Not dependent on group {}".format(group)
else:
dind[kk] = self._ddata[kk]['groups'].index(group)
# Start loop for interpolation
dout = {}
for kk in dout.keys():
shape = self._ddata['dict'][kk]['shape']
if not isinstance(self._ddata[kk]['data'], np.ndarray):
dfail[kk] = "Not a np.ndarray !"
continue
kr = self._ddata['dict'][kk]['refs'][dind[kk]]
vr = self._ddata['dict'][kr]['data']
data = self._ddata['dict'][kk]['data']
try:
if dind[kk] == len(shape) - 1:
dout[kk] = scpinterp.interp1d(vr, y,
kind=kind, axis=-1,
bounds_error=bounds_error,
fill_value=fill_value,
assume_sorted=True)(x)
else:
dout[kk] = scpinterp.interp1d(vr, y,
kind=kind, axis=dind[kk],
bounds_error=bounds_error,
fill_value=fill_value,
assume_sorted=True)(x)
except Exception as err:
dfail[kk] = str(err)
return dout, dfail
# ---------------------
# Method for fitting models in one direction
# ---------------------
# TBC
def _fit_one_dim(ind=None, key=None, group=None,
Type=None, func=None, **kwdargs):
""" Return the parameters of a fitted function
The interpolated data is chosen method select() with args:
- key, ind
Returns:
--------
dout: dict
dict of interpolated data
dfail: dict of failed interpolations, with error messages
"""
# Get keys to interpolate
lk = self._ind_tofrom_key(ind=ind, key=key, group=group, returnas=str)
# Start model fitting loop on data keys
dout = {}
for kk in lk:
x = None
axis = None
dfit = _comp_new.fit(self._ddata['dict'][kk]['data'],
x=x, axis=axis,
func=func, Type=Type, **kwdargs)
dout[kk] = dfit
return dout
# ---------------------
# Methods for plotting data
# ---------------------
def _plot_timetraces(self, ntmax=1, group='time',
key=None, ind=None, Name=None,
color=None, ls=None, marker=None, ax=None,
axgrid=None, fs=None, dmargin=None,
legend=None, draw=None, connect=None, lib=None):
plotcoll = self.to_PlotCollection(ind=ind, key=key, group=group,
Name=Name, dnmax={group: ntmax})
return _plot_new.plot_DataColl(
plotcoll,
color=color, ls=ls, marker=marker, ax=ax,
axgrid=axgrid, fs=fs, dmargin=dmargin,
draw=draw, legend=legend,
connect=connect, lib=lib,
)
def _plot_axvlines(
self,
which=None,
key=None,
ind=None,
param_x=None,
param_txt=None,
sortby=None,
sortby_def=None,
sortby_lok=None,
ax=None,
ymin=None,
ymax=None,
ls=None,
lw=None,
fontsize=None,
side=None,
dcolor=None,
dsize=None,
fraction=None,
figsize=None,
dmargin=None,
wintit=None,
tit=None,
):
""" plot rest wavelengths as vertical lines """
# Check inputs
which, dd = self.__check_which(
which=which, return_dict=True,
)
key = self._ind_tofrom_key(which=which, key=key, ind=ind, returnas=str)
if sortby is None:
sortby = sortby_def
if sortby not in sortby_lok:
msg = (
"""
For plotting, sorting can be done only by:
{}
You provided:
{}
""".format(sortby_lok, sortby)
)
raise Exception(msg)
return _plot_new.plot_axvline(
din=dd,
key=key,
param_x='lambda0',
param_txt='symbol',
sortby=sortby, dsize=dsize,
ax=ax, ymin=ymin, ymax=ymax,
ls=ls, lw=lw, fontsize=fontsize,
side=side, dcolor=dcolor,
fraction=fraction,
figsize=figsize, dmargin=dmargin,
wintit=wintit, tit=tit,
)
# ---------------------
# saving => get rid of function
# ---------------------
def save(self, path=None, name=None,
strip=None, sep=None, deep=True, mode='npz',
compressed=False, verb=True, return_pfe=False):
# Remove function mpltri if relevant
lk = [
k0 for k0, v0 in self._ddata.items()
if isinstance(v0['data'], dict)
and 'mpltri' in v0['data'].keys()
]
for k0 in lk:
del self._ddata[k0]['data']['mpltri']
lk = [
k0 for k0, v0 in self._ddata.items()
if isinstance(v0['data'], dict)
and 'trifind' in v0['data'].keys()
]
for k0 in lk:
del self._ddata[k0]['data']['trifind']
# call parent method
return super().save(
path=path, name=name,
sep=sep, deep=deep, mode=mode,
strip=strip, compressed=compressed,
return_pfe=return_pfe, verb=verb
)
|
mit
|
bsaleil/lc
|
tools/graphs.py
|
1
|
14708
|
#!/usr/bin/env python3
#!/usr/bin/python3
#---------------------------------------------------------------------------
#
# Copyright (c) 2015, Baptiste Saleil. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#---------------------------------------------------------------------------
# No font with Ubuntu:
# http://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib
# Execute compiler with stats option for all benchmarks
# Parse output
# Draw graphs
help = """
graphs.py - Generate graphs from compiler output
Use:
graphs.py [OPTION...]
Options:
-h,--help
Print this help.
--drawall
Draw all graphs. By default the script let the user choose the information to draw.
--stdexec
Use standard execution. Same as --exec="Standard;"?
--exec="DESCRIPTION;COMPILER_OPTION1 COMPILER_OPTION2 ..."
Add execution with given compiler options. All given executions are drawn
Example:
graphs.py --exec="Standard exec;" --exec="With all tests;--all-tests" --drawall
Draw all graphs for both executions (Standard, and with all-tests option).
graphs.py --stdexec
Let the user interactively choose the information to draw from only standard execution.
"""
import sys
import io
import glob
import os
import subprocess
from pylab import *
from copy import deepcopy
from matplotlib.backends.backend_pdf import PdfPages
# Constants
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) + '/' # Current script path
LC_PATH = SCRIPT_PATH + '../' # Compiler path
LC_EXEC = 'lazy-comp' # Compiler exec name
PDF_OUTPUT = SCRIPT_PATH + 'graphs.pdf' # PDF output file
BENCH_PATH = LC_PATH + 'benchmarks/*.scm' # Benchmarks path
BAR_COLORS = ["#222222","#555555","#888888","#AAAAAA","#DDDDDD"] # Bar colors
BAR_COLORS = ["#BBBBBB","#999999","#777777","#555555","#333333"] # Bar colors
#BAR_COLORS = ["#222222", "#666666", "#AAAAAA", "#EEEEEE"] # Paper sw15
FONT_SIZE = 9
# Parser constants, must match compiler --stats output
CSV_INDICATOR = '--'
STAT_SEPARATOR = ':'
CSV_SEPARATOR = ';'
# Options
DRAW_ALL = '--drawall' # Draw all graphs
STD_EXEC = '--stdexec' # Add standard execution to executions list
REF_EXEC = '--refexec' # Set reference execution for scale
SORT_EXEC = '--sortexec' # Sort
OPT_REF = False
OPT_SORT = False
# Globals
execs = {}
lexecs = []
printhelp = False
# Set current working directory to compiler path
os.chdir(LC_PATH)
# Get all benchmarks full path sorted by name
files = sorted(glob.glob(BENCH_PATH))
# Graph config
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
matplotlib.rcParams.update({'font.size': FONT_SIZE})
#-------------------------------------------------------------------------------------
# Utils
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def WARNING(s):
print('WARNING: ' + s)
# Used as matplotlib formatter
def to_percent(y, position):
s = str(int(y))
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
#-------------------------------------------------------------------------------------
# Main
def setargs():
global printhelp
global OPT_REF
global OPT_SORT
if '-h' in sys.argv or '--help' in sys.argv:
printhelp = True
if STD_EXEC in sys.argv:
execs['Standard'] = ''
if REF_EXEC in sys.argv:
OPT_REF = sys.argv[sys.argv.index(REF_EXEC)+1]
if SORT_EXEC in sys.argv:
OPT_SORT = sys.argv[sys.argv.index(SORT_EXEC)+1]
for arg in sys.argv:
if arg.startswith('--exec='):
pair = arg[7:].split(';')
name = pair[0]
lcargs = pair[1].split()
execs[name] = lcargs
lexecs.append(name)
def go():
if printhelp:
print(help)
else:
# 1 - run benchmarks and parse compiler output
benchs_data = {}
keys = []
for ex in execs:
ks,data = runparse(execs[ex]) # TODO : donner arguments
if keys == []:
keys = ks
else:
if len(ks) != len(keys):
raise Exception("Error")
benchs_data[ex] = data
# 2 - Draw all graphs
drawGraphs(keys,benchs_data)
print('Done!')
# Run compiler with 'opts', parse output and return keys and data
def runparse(opts):
print("Running with options: '" + ' '.join(opts) + "'")
data = {}
# Get keys
first = files[0]
keys = []
for file in files:
file_name = os.path.basename(file)
print(file_name + '...')
options = [LC_PATH + LC_EXEC, file, '--stats']
options.extend(opts) # TODO : renommer 'options'
output = subprocess.check_output(options).decode("utf-8")
bench_data = parseOutput(output)
data[file_name] = bench_data
# Get keys on first result
if file == first:
for key in bench_data:
keys.append(key)
return keys,data
#-------------------------------------------------------------------------------------
# Parser: Read stats output from compiler and return python table representation
# Read 'KEY:VALUE' stat
def readStat(stream,data,line):
stat = line.split(STAT_SEPARATOR)
key = stat[0].strip()
val = num(stat[1].strip())
# Store key/value in global data
data[key] = val
line = stream.readline()
return line
# Read CSV stat
def readCSV(stream,data):
csv = []
# Consume CSV indicator line
line = stream.readline()
# Read table title
title = line.strip()
line = stream.readline()
# Read table header
header = line.split(CSV_SEPARATOR)
for el in header:
csv.append([el.strip()])
# Read CSV data
line = stream.readline()
while not line.startswith(CSV_INDICATOR):
linecsv = line.split(CSV_SEPARATOR)
for i in range(0,len(linecsv)):
csv[i].extend([num(linecsv[i].strip())]) ## THIS IS NOT EFFICIENT (for large CSV outputs)
line = stream.readline()
# Store key/value (title/csv) in global data
data[title] = csv
# Consume CSV indicator line
line = stream.readline()
return line
# Return python table from compiler 'output'
def parseOutput(output):
# Data for this benchmark
data = {}
# Stream
stream = io.StringIO(output)
# Parse
line = stream.readline()
while line:
# CSV table
if line.startswith(CSV_INDICATOR):
line = readCSV(stream,data)
# Key/Value line
else:
line = readStat(stream,data,line)
return data
#-------------------------------------------------------------------------------------
# Draw
# Draw all graphs associated to keys using benchs_data
# benchs_data contains all information for all benchmarks for all executions
# ex. benchs_data['Standard']['array1.scm']['Closures'] to get the number of
# closures created for benchmark array1.scm using standard exec
def drawGraphs(keys,benchs_data):
# Let user choose the graph to draw (-1 or empty for all graphs)
if not DRAW_ALL in sys.argv:
sortedKeys = sorted(keys)
print('Keys:')
print('-1: ALL')
for i in range(0,len(sortedKeys)):
print(' ' + str(i) + ': ' + sortedKeys[i])
inp = input('Key to draw (all) > ')
if not inp == '':
choice = num(inp)
if choice >= 0:
keys = [sortedKeys[choice]]
firstExec = list(benchs_data.keys())[0]
firstBenchmark = os.path.basename(files[0])
# Gen pdf output file
pdf = PdfPages(PDF_OUTPUT)
# For each key
for key in keys:
# CSV, NYI
if type(benchs_data[firstExec][firstBenchmark][key]) == list:
drawCSV(pdf,key,benchs_data)
# Key/Value, draw graph
else:
print("Drawing '" + key + "'...")
drawKeyValueGraph(pdf,key,benchs_data)
pdf.close()
## This is a specific implementation for #stubs/#versions
## TODO: Do something generic !
def drawCSV(pdf,key,benchs_data):
fig = plt.figure(key)
title = key
res = {}
for execution in benchs_data:
for bench in benchs_data[execution]:
for data in benchs_data[execution][bench][key]:
if data[0] == '#stubs':
for i in range(0,len(data)-1):
index = i+1
numvers = i
if (numvers >= 5):
numvers = -1
if (numvers in res):
res[numvers] += data[index]
else:
res[numvers] = data[index]
xvals = []
yvals = []
labels = []
keys = sorted(res.keys())
for key in keys:
if key != 0 and key != -1:
xvals.append(key)
yvals.append(res[key])
labels.append(key)
xvals.append(len(xvals)+1)
yvals.append(res[-1])
labels.append('>=5')
sum = 0
for val in yvals:
sum += val
for i in range(0,len(yvals)):
p = (yvals[i] * 100) / sum
yvals[i] = p
plt.title(title + ' (total=' + str(sum) + ')')
X = np.array(xvals)
Y = np.array(yvals)
bar(X, +Y, 1, facecolor=BAR_COLORS[0], edgecolor='white', label=key, zorder=10)
axes = gca()
axes.get_xaxis().set_visible(False)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
for i in range(0,len(labels)):
text(X[i]+0.25, -0.0, labels[i], ha='right', va='top')
# print(xvals)
# print(yvals)
# print(labels)
# print(res)
pdf.savefig(fig)
# Draw graph for given key
# Y: values for this key
# X: benchmarks
def drawKeyValueGraph(pdf,key,benchs_data):
fig = plt.figure(key,figsize=(8,3.4))
#plt.title(key)
exec_ref = ''
# Number of benchmarks
firstExec = list(benchs_data.keys())[0]
n = len(benchs_data[firstExec]) + 1 # +1 for mean
X = np.arange(n) # X set is [0, 1, ..., n-1]
Ys = {}
# For each exec
for d in benchs_data:
Y = []
# For each benchmark
for f in files:
Y.extend([benchs_data[d][os.path.basename(f)][key]])
# Transforme en tableau numpy
Y = np.array(Y)
Ys[d] = Y
width = (1 / (len(Ys)+1)) # +1 for mean
#----------
# TODO: move to external fn
# Use a reference execution. All values for this exec are 100%
# Values for others executions are computed from this reference exec
if OPT_REF:
# Add % symbol to y values
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
exec_ref = OPT_REF # Reference execution (100%)
Y2 = deepcopy(Ys) # Deep copy of Y values
# Set all references to 100
for v in range(0,len(Y2[exec_ref])):
Y2[exec_ref][v] = '100'
# For each exec which is not ref exec
candraw = True # TODO : rename
for ex in Y2:
if ex != exec_ref:
for i in range(0,len(Y2[ex])):
ref = Ys[exec_ref][i]
cur = Ys[ex][i]
# We can't compute %, warning and stop
if ref == 0:
WARNING("Can't draw '" + key + "' using a reference execution.")
return
# Compute % and set
else:
Y2[ex][i] = (cur*100)/ref
# Y2 are the new values to draw
Ys = Y2
#----------
fileList = files
Yvals = Ys
# Sort Y values by a given execution
if OPT_SORT:
fileList,Yvals = sortByExecution(Yvals,OPT_SORT)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
i = 0
# TODO: add to --help: the script draws the exec bar in order
for key in lexecs:
if key != exec_ref:
Y = Yvals[key]
color = BAR_COLORS[i]
arith_mean = sum(Y) / float(len(Y))
print("MEANS:")
print(key + ": " + str(arith_mean))
Y = np.append(Y,[arith_mean]) # Add mean before drawing bars
bar(X+(i*width)+0.05, +Y, width, facecolor=color, linewidth=0, label=key)
i += 1
# Hide X values
axes.get_xaxis().set_visible(False)
plt.tick_params(axis='both', which='minor')
# # Set Y limit
#l = len(str(max(Y2))) # number of digit of max value
#ylim(0,max(Y2)+pow(10,l-1)) # Y is from 0 to (max + 10^i-1)
# # Draw values for each bar
# for x,y in zip(X,Y1):
# text(x+0.4, y+0.05, '%.2f' % y, ha='center', va= 'bottom')
ylim(0,120)
xlim(0,n)
# Draw benchmark name
names = fileList
names.append("ari-mean.scm") # Add mean name
for i in range(0,len(fileList)):
text(X[i]+0.40, -3, os.path.basename(fileList[i])[:-4], rotation=90, ha='center', va='top')
# Legend:
# Shrink by 10% on the bottom
box = axes.get_position()
axes.set_position([box.x0, box.y0 + box.height * 0.34, box.width, box.height * 0.66])
# Put a legend below axis
ncol = int(len(lexecs)/3);
legend(loc='upper center', bbox_to_anchor=(0., 0., 1., -0.35), prop={'size':FONT_SIZE}, ncol=ncol, mode='expand', borderaxespad=0.)
# Save to pdf
pdf.savefig(fig)
#-------------------------------------------------------------------------------------
# Manage Y values
# Sort Y values by values from a specific execution
def sortByExecution(Ys,execref):
# Pseudo-decorate: Change data layout to allow the useof sort()
decorated = []
for fileIndex in range(0,len(files)):
r = [] # List of results for current file
for execution in Ys:
r.extend([execution,Ys[execution][fileIndex]])
r.append(files[fileIndex])
decorated.append(r)
# Sort
i = decorated[0].index(execref)
decorated = sorted(decorated,key=lambda el: el[i+1])
# Pseudo-undecorate: Restore previous layout with sorted data
undecorated = {}
ordered_files = []
i = 0;
while not decorated[0][i] in files:
execution = decorated[0][i]
vals = []
# For each data associated to file
for el in decorated:
vals.append(el[i+1])
filepath = el[len(el)-1]
if not filepath in ordered_files:
ordered_files.append(filepath)
undecorated[execution] = np.asarray(vals);
i+=2
return(ordered_files,undecorated)
#-------------------------------------------------------------------------------------
setargs()
go()
|
bsd-3-clause
|
EconomicSL/housing-model
|
src/main/resources/calibration/code/bak/Tools_MHedit.py
|
2
|
5712
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 16:32:30 2015
@author: 326052
"""
import Datasets as ds
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
import pylab as pyl
savedOutput1 = 0
savedOutput2 = 0
hist = 0
class DiscountDistribution:
ysize = 200
dist = np.zeros((400,ysize))
yOrigin = 200
yUnit = 0.1
def add(self, start, end, percent):
for day in range(start, end):
self.addPoint(day,percent)
def addPoint(self, day, percent):
yindex = percent/self.yUnit + self.yOrigin
# if(yindex > 399): yindex = 399
# if(yindex < 0): yindex = 0
if(day < 400 and yindex < self.ysize and yindex >= 0):
self.dist[day, yindex] += 1
class PriceCalc():
currentprice = 0
initialmarketprice = 0
daysonmarket = 0
lastChange = 0
def __init__(self, initialDate, initialPrice):
self.currentprice = initialPrice
self.initialmarketprice = initialPrice
self.daysonmarket = 0
self.lastChange = self.dateconvert(initialDate)
def dateconvert(self, dateString):
if type(dateString) is str:
return(datetime.strptime(dateString, "%Y-%m-%d"))
else:
return(datetime.strptime("1900-01-01", "%Y-%m-%d"))
def add(self, dateString, price):
newDate = self.dateconvert(dateString)
startDays = self.daysonmarket
endDays = self.daysonmarket + (newDate - self.lastChange).days
reduction = (self.currentprice - self.initialmarketprice)*100.0/self.initialmarketprice
self.currentprice = price
self.daysonmarket = endDays
self.lastChange = newDate
# for month in range(0,120):
# if math.trunc(endDays/30.0-1.0)==month:
# if reduction !=0.0:
# self.changeind = 1
# else:
# self.changeind =0
# else:
# self.changeind = 0
#
# changeind = self.changeind
#
return(startDays, endDays, reduction)
def plotProbability(mat):
plt.figure(figsize=(10, 10))
im = plt.imshow(mat, origin='low', cmap=cm.jet)
plt.colorbar(im, orientation='horizontal')
plt.show()
distribution = DiscountDistribution()
def ZooplaPriceChanges():
total = 0
pSame = 0
priceMap = {}
# distribution = DiscountDistribution()
data = ds.ZooplaMatchedDaily()
# store = pd.HDFStore('rawDaily.hd5',mode='w')
# for chunk in data.parser:
chunk = data.read(10000000)
chunk.rename(columns={'\xef\xbb\xbfLISTING ID':'LISTING ID'},inplace=True)
filteredchunk = chunk[chunk["MARKET"]=="SALE"][['LISTING ID','DAY','PRICE']][chunk['PRICE']>0]
change = []
changeprice = []
nochange = []
for row in filteredchunk.values:
if row[0] in priceMap:
if(priceMap[row[0]].currentprice == row[2]):
# no change
nochange.append(priceMap[row[0]].daysonmarket/30)
else:'
change.append(priceMap[row[0]].daysonmarket/30)
changeprice.append([priceMap[row[0]].daysonmarket/30, -(priceMap[row[0]].currentprice-row[2])/row[2]*100])
startDay, endDay, percent = priceMap[row[0]].add(row[1],row[2])
distribution.add(startDay, endDay, percent)
else:
priceMap[row[0]] = PriceCalc(row[1],row[2])
# now get deletion dates
delData = ds.ZooplaMatchedCollated()
# for chunk in delData.parser:
chunk = delData.read(10000000)
chunk.rename(columns={'\xef\xbb\xbfLISTING ID':'LISTING ID'},inplace=True)
filteredchunk = chunk[chunk["MARKET"]=="SALE"][['LISTING ID','DELETED']]
for row in filteredchunk.values:
if row[0] in priceMap:
if(priceMap[row[0]].currentprice == priceMap[row[0]].initialmarketprice):
pSame += 1
total += 1
print pSame, total, pSame*1.0/total
for row in filteredchunk.values:
if row[0] in priceMap:
startDay, endDay, percent = priceMap[row[0]].add(row[1],0)
distribution.add(startDay, endDay, percent)
priceMap.pop(row[0])
print len(priceMap)
global savedOutput1
global savedOutput2
global savedOutput3
savedOutput1 = nochange
savedOutput2 = change
savedOutput3 = changeprice
plotProbability(distribution.dist)
global hist
global n, n1, n2, nprice, df
# hist = np.histogram(savedOutput1)
n1, bins1, patches1 = pyl.hist(savedOutput1,bins=range(min(savedOutput1), max(savedOutput1) + 1, 1))
n2, bins2, patches2 = pyl.hist(savedOutput2,bins=range(min(savedOutput2), max(savedOutput2) + 1, 1))
dist, binsa, binsb = np.histogram2d([x[0] for x in savedOutput3], [x[1] for x in savedOutput3], range=[[0,30],[-30,0]], bins=[30,20])
# plt.imshow(dist)
n = n2/(n1+n2)
return(n, n1, n2)
# plt.imshow(dist)
# print filteredchunk.dtypes
# print filteredchunk
# store.append('df',filteredchunk)
# store.close()
#delData = ds.ZooplaMatchedCollated()
#for chunk in delData.parser:
# chunk.rename(columns={'\xef\xbb\xbfLISTING ID':'LISTING ID'},inplace=True)
# filteredchunk = chunk[chunk["MARKET"]=="SALE"][['LISTING ID','DELETED']]
# for row in filteredchunk.values:
# print row
ZooplaPriceChanges()
#data = pd.io.pytables.read_hdf('test.hd5','df')
#print data
|
mit
|
IshankGulati/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
21
|
30770
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights : boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
stochasticHydroTools/RigidMultiblobsWall
|
general_application_utils.py
|
1
|
22024
|
'''File with utilities for the scripts and functions in this project.'''
import logging
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
except ImportError:
pass
import numpy as np
import scipy.sparse.linalg as scspla
import os
import sys
import time
from functools import partial
# from quaternion_integrator.quaternion import Quaternion
DT_STYLES = {} # Used for plotting different timesteps of MSD.
# Fake log-like class to redirect stdout to log file.
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class Tee(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for f in self.files:
f.write(obj)
f.flush() # If you want the output to be visible immediately
def flush(self) :
for f in self.files:
f.flush()
# Static Variable decorator for calculating acceptance rate.
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
def set_up_logger(file_name):
''' Set up logging info, write all print statements and errors to
file_name.'''
progress_logger = logging.getLogger('Progress Logger')
progress_logger.setLevel(logging.INFO)
# Add the log message handler to the logger
logging.basicConfig(filename=file_name,
level=logging.INFO,
filemode='w')
sl = StreamToLogger(progress_logger, logging.INFO)
sys.stdout = sl
sl = StreamToLogger(progress_logger, logging.ERROR)
sys.stderr = sl
return progress_logger
class MSDStatistics(object):
'''
Class to hold the means and std deviations of the time dependent
MSD for multiple schemes and timesteps. data is a dictionary of
dictionaries, holding runs indexed by scheme and timestep in that
order.
Each run is organized as a list of 3 arrays: [time, mean, std]
mean and std are matrices (the rotational MSD).
'''
def __init__(self, params):
self.data = {}
self.params = params
def add_run(self, scheme_name, dt, run_data):
'''
Add a run. Create the entry if need be.
run is organized as a list of 3 arrays: [time, mean, std]
In that order.
'''
if scheme_name not in self.data:
self.data[scheme_name] = dict()
self.data[scheme_name][dt] = run_data
def print_params(self):
print("Parameters are: ")
print(self.params)
def plot_time_dependent_msd(msd_statistics, ind, figure, color=None, symbol=None,
label=None, error_indices=[0, 1, 2, 3, 4, 5], data_name=None,
num_err_bars=None):
'''
Plot the <ind> entry of the rotational MSD as
a function of time on given figure (integer).
This uses the msd_statistics object
that is saved by the *_rotational_msd.py script.
ind contains the indices of the entry of the MSD matrix to be plotted.
ind = [row index, column index].
'''
scheme_colors = {'RFD': 'g', 'FIXMAN': 'b', 'EM': 'r'}
pyplot.figure(figure)
# Types of lines for different dts.
write_data = True
data_write_type = 'w'
if not data_name:
data_name = "MSD-component-%s-%s.txt" % (ind[0], ind[1])
if write_data:
np.set_printoptions(threshold=np.nan)
with open(os.path.join('.', 'data', data_name), data_write_type) as f:
f.write(' \n')
if not num_err_bars:
num_err_bars = 40
linestyles = [':', '--', '-.', '']
for scheme in list(msd_statistics.data.keys()):
for dt in list(msd_statistics.data[scheme].keys()):
if dt in list(DT_STYLES.keys()):
if not symbol:
style = ''
nosymbol_style = DT_STYLES[dt]
else:
style = symbol #+ DT_STYLES[dt]
nosymbol_style = DT_STYLES[dt]
else:
if not symbol:
style = '' #linestyles[len(DT_STYLES)]
DT_STYLES[dt] = linestyles[len(DT_STYLES)]
nosymbol_style = DT_STYLES[dt]
else:
DT_STYLES[dt] = linestyles[len(DT_STYLES)]
style = symbol #+ DT_STYLES[dt]
nosymbol_style = DT_STYLES[dt]
# Extract the entry specified by ind to plot.
num_steps = len(msd_statistics.data[scheme][dt][0])
# Don't put error bars at every point
err_idx = [int(num_steps*k/num_err_bars) for k in range(num_err_bars)]
msd_entries = np.array([msd_statistics.data[scheme][dt][1][_][ind[0]][ind[1]]
for _ in range(num_steps)])
msd_entries_std = np.array(
[msd_statistics.data[scheme][dt][2][_][ind[0]][ind[1]]
for _ in range(num_steps)])
# Set label and style.
if label:
if scheme == 'FIXMAN':
plot_label = scheme.capitalize() + label
else:
plot_label = scheme + label
else:
plot_label = '%s, dt=%s' % (scheme, dt)
if color:
plot_style = color + style
nosymbol_plot_style = color + nosymbol_style
err_bar_color = color
else:
plot_style = scheme_colors[scheme] + style
nosymbol_plot_style = scheme_colors[scheme] + nosymbol_style
err_bar_color = scheme_colors[scheme]
pyplot.plot(np.array(msd_statistics.data[scheme][dt][0])[err_idx],
msd_entries[err_idx],
plot_style,
label = plot_label)
pyplot.plot(msd_statistics.data[scheme][dt][0],
msd_entries,
nosymbol_plot_style)
if write_data:
with open(os.path.join('.', 'data', data_name),'a') as f:
f.write("scheme %s \n" % scheme)
f.write("dt %s \n" % dt)
f.write("time: %s \n" % msd_statistics.data[scheme][dt][0])
f.write("MSD component: %s \n" % msd_entries)
f.write("Std Dev: %s \n" % msd_entries_std)
if ind[0] in error_indices:
pyplot.errorbar(np.array(msd_statistics.data[scheme][dt][0])[err_idx],
msd_entries[err_idx],
yerr = 2.*msd_entries_std[err_idx],
fmt = err_bar_color + '.')
pyplot.ylabel('MSD')
pyplot.xlabel('time')
def log_time_progress(elapsed_time, time_units, total_time_units):
''' Write elapsed time and expected duration to progress log.'''
progress_logger = logging.getLogger('progress_logger')
if elapsed_time > 60.0:
progress_logger.info('Elapsed Time: %.2f Minutes.' %
(float(elapsed_time/60.)))
else:
progress_logger.info('Elapsed Time: %.2f Seconds' % float(elapsed_time))
# Report estimated duration.
if time_units > 0:
expected_duration = elapsed_time*total_time_units/time_units
if expected_duration > 60.0:
progress_logger.info('Expected Duration: %.2f Minutes.' %
(float(expected_duration/60.)))
else:
progress_logger.info('Expected Duration: %.2f Seconds' %
float(expected_duration))
def calc_total_msd_from_matrix_and_center(original_center, original_rotated_e,
final_center, rotated_e):
'''
Calculate 6x6 MSD including orientation and location. This is
calculated from precomputed center of the tetrahedron and rotation
matrix data to avoid repeating computation.
'''
du_hat = np.zeros(3)
for i in range(3):
du_hat += 0.5*np.cross(original_rotated_e[i],
rotated_e[i])
dx = np.array(final_center) - np.array(original_center)
displacement = np.concatenate([dx, du_hat])
return np.outer(displacement, displacement)
def calc_msd_data_from_trajectory(locations, orientations, calc_center_function, dt, end,
burn_in = 0, trajectory_length = 100):
''' Calculate rotational and translational (6x6) MSD matrix given a dictionary of
trajectory data. Return a numpy array of 6x6 MSD matrices, one for each time.
params:
locations: a list of length 3 lists, indication location of the rigid body
at each timestep.
orientations: a list of length 4 lists, indication entries of a quaternion
representing orientation of the rigid body at each timestep.
calc_center_function: a function that given location and orientation
(as a quaternion) computes the center of the body (or the point
that we use to track location MSD).
dt: timestep used in this simulation.
end: end time to which we calculate MSD.
burn_in: how many steps to skip before calculating MSD. This is 0 by default
because we assume that the simulation starts from a sample from the
Gibbs Boltzman distribution.
trajectory_length: How many points to keep in the window 0 to end.
The code will process every n steps to make the total
number of analyzed points roughly this value.
'''
data_interval = int(end/dt/trajectory_length) + 1
print("data_interval is ", data_interval)
n_steps = len(locations)
e_1 = np.array([1., 0., 0.])
e_2 = np.array([0., 1., 0.])
e_3 = np.array([0., 0., 1.])
if trajectory_length*data_interval > n_steps:
raise Exception('Trajectory length is longer than the total run. '
'Perform a longer run, or choose a shorter end time.')
print_increment = int(n_steps/20)
average_rotational_msd = np.array([np.zeros((6, 6))
for _ in range(trajectory_length)])
lagged_rotation_trajectory = []
lagged_location_trajectory = []
start_time = time.time()
for k in range(n_steps):
if k > burn_in and (k % data_interval == 0):
orientation = Quaternion(orientations[k])
R = orientation.rotation_matrix()
u_hat = [np.inner(R, e_1),
np.inner(R, e_2),
np.inner(R,e_3)]
lagged_rotation_trajectory.append(u_hat)
lagged_location_trajectory.append(calc_center_function(locations[k], orientation))
if len(lagged_location_trajectory) > trajectory_length:
lagged_location_trajectory = lagged_location_trajectory[1:]
lagged_rotation_trajectory = lagged_rotation_trajectory[1:]
for l in range(trajectory_length):
current_rot_msd = (calc_total_msd_from_matrix_and_center(
lagged_location_trajectory[0],
lagged_rotation_trajectory[0],
lagged_location_trajectory[l],
lagged_rotation_trajectory[l]))
average_rotational_msd[l] += current_rot_msd
if (k % print_increment) == 0 and k > 0:
print('At step %s of %s' % (k, n_steps))
print('For this run, time status is:')
elapsed = time.time() - start_time
log_time_progress(elapsed, k, n_steps)
average_rotational_msd = (average_rotational_msd/
(n_steps/data_interval - trajectory_length -
burn_in/data_interval))
return average_rotational_msd
def fft_msd(x, y, end):
''' Calculate scalar MSD between x and yusing FFT.
We want D(tau) = sum( (x(t+tau) -x(t))*(y(t+tau) - y(t)) )
This is computed with
D(tau) = sum(x(t)y(t)) + sum(x(t+tau)y(t+tau) - sum(x(t)*x(t+tau)
- sum(y(t)x(t+tau))
Where the last 2 sums are performed using an FFT.
We expect that x and y are the same length.
WARNING: THIS IS NOT CURRENTLY USED OR TESTED THOROUGHLY'''
if len(x) != len(y):
raise Exception('Length of X and Y are not the same, aborting MSD '
'FFT calculation.')
xy_sum_tau = np.cumsum(x[::-1]*y[::-1])[::-1]/np.arange(len(x), 0, -1)
xy_sum_t = np.cumsum(x*y)[::-1]/np.arange(len(x), 0, -1)
x_fft = np.fft.fft(x)
y_fft = np.fft.fft(y)
x_fft_xy = np.zeros(len(x))
x_fft_yx = np.zeros(len(x))
x_fft_xy[1:] = (x_fft[1:])*(y_fft[:0:-1])
x_fft_xy[0] = x_fft[0]*y_fft[0]
x_fft_yx[1:] = (y_fft[1:])*(x_fft[:0:-1])
x_fft_yx[0] = x_fft[0]*y_fft[0]
x_ifft_xy = np.fft.ifft(x_fft_xy)/np.arange(len(x), 0, -1)
x_ifft_yx = np.fft.ifft(x_fft_yx)/np.arange(len(x), 0, -1)
return (xy_sum_tau + xy_sum_t - x_ifft_yx - x_ifft_xy)[:end]
def write_trajectory_to_txt(file_name, trajectory, params, location=True):
'''
Write parameters and data to a text file. Parameters first, then the trajectory
one step at a time.
'''
# First check that the directory exists. If not, create it.
dir_name = os.path.dirname(file_name)
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
# Write data to file, parameters first then trajectory.
with open(file_name, 'w') as f:
f.write('Parameters:\n')
for key, value in list(params.items()):
f.writelines(['%s: %s \n' % (key, value)])
f.write('Trajectory data:\n')
if location:
f.write('Location, Orientation:\n')
for k in range(len(trajectory[0])):
x = trajectory[0][k]
theta = trajectory[1][k]
f.write('%s, %s, %s, %s, %s, %s, %s \n' % (
x[0], x[1], x[2], theta[0], theta[1], theta[2], theta[3]))
else:
f.write('Orientation:\n')
for k in range(len(trajectory[0])):
theta = trajectory[0][k]
f.write('%s, %s, %s, %s \n' % (
theta[0], theta[1], theta[2], theta[3]))
def read_trajectory_from_txt(file_name, location=True):
'''
Read a trajectory and parameters from a text file.
'''
params = {}
locations = []
orientations = []
with open(file_name, 'r') as f:
# First line should be "Parameters:"
line = f.readline()
line = f.readline()
while line != 'Trajectory data:\n':
items = line.split(':')
if items[1].strip()[0] == '[':
last_token = items[1].strip()[-1]
list_items = items[1].strip()[1:].split(' ')
params[items[0]] = list_items
while last_token != ']':
line = f.readline()
list_items = line.strip().split(' ')
last_token = list_items[-1].strip()[-1]
if last_token == ']':
list_items[-1] = list_items[-1].strip()[:-1]
params[items[0]] += list_items
else:
params[items[0]] = items[1]
line = f.readline()
# Read next line after 'Trajectory data' 'Location, Orientation'
line = f.readline()
line = f.readline()
if location:
while line != '':
loc = line.split(',')
locations.append([float(x) for x in loc[0:3]])
orientations.append([float(x) for x in loc[3:7]])
line = f.readline()
else:
# These two lines are '\n', and 'Orientation'
line = f.readline()
line = f.readline()
while line != '':
quaternion_entries = line.split(',')
orientations.append(Quaternion([float(x) for x in quaternion_entries]))
line = f.readline()
return params, locations, orientations
def transfer_mobility(mobility_1, point_1, point_2):
'''
Calculate mobility at point 2 based on mobility
at point_1 of the body. This calculates the entire
force and torque mobility.
args:
mobility_1: mobility matrix (force, torque) ->
(velocity, angular velocity) evaluated at point_1.
point_1: 3 dimensional point where mobility_1 is evaluated.
point_2: 3 dimensional point where we want to know the mobility
returns:
mobility_2: The mobility matrix evaluated at point_2.
This uses formula (10) and (11) from:
"Bernal, De La Torre - Transport Properties and Hydrodynamic Centers
of Rigid Macromolecules with Arbitrary Shapes"
'''
r = np.array(point_1) - np.array(point_2)
mobility_2 = np.zeros([6, 6])
# Rotational mobility is the same.
mobility_2[3:6, 3:6] = mobility_1[3:6, 3:6]
mobility_2[3:6, 0:3] = mobility_1[3:6, 0:3]
mobility_2[3:6, 0:3] += tensor_cross_vector(mobility_1[3:6, 3:6], r)
mobility_2[0:3, 3:6] = mobility_2[3:6, 0:3].T
# Start with point 1 translation.
mobility_2[0:3, 0:3] = mobility_1[0:3, 0:3]
# Add coupling block transpose cross r.
mobility_2[0:3, 0:3] += tensor_cross_vector(mobility_1[0:3, 3:6 ], r)
# Subtract r cross coupling block.
mobility_2[0:3, 0:3] -= vector_cross_tensor(r, mobility_1[3:6, 0:3])
# Subtract r cross D_r cross r
mobility_2[0:3, 0:3] -= vector_cross_tensor(
r, tensor_cross_vector(mobility_1[3:6, 3:6], r))
return mobility_2
def tensor_cross_vector(T, v):
'''
Tensor cross vector from De La Torre paper.
Assume T is 3x3 and v is length 3
'''
result = np.zeros([3, 3])
for k in range(3):
for l in range(3):
result[k, l] = (T[k, (l+1) % 3]*v[(l - 1) % 3] -
T[k, (l-1) % 3]*v[(l + 1) % 3])
return result
def vector_cross_tensor(v, T):
'''
vector cross trensor from De La Torre paper.
Assume T is 3x3 and v is length 3
'''
result = np.zeros([3, 3])
for k in range(3):
for l in range(3):
result[k, l] = (T[(k-1) % 3, l]*v[(k + 1) % 3] -
T[(k+1) % 3, l]*v[(k - 1) % 3])
return result
@static_var('timers', {})
def timer(name, print_one = False, print_all = False, clean_all = False):
'''
Timer to profile the code. It measures the time elapsed between successive
calls and it prints the total time elapsed after sucesive calls.
'''
if name not in timer.timers:
timer.timers[name] = (0, time.time())
elif timer.timers[name][1] is None:
time_tuple = (timer.timers[name][0], time.time())
timer.timers[name] = time_tuple
else:
time_tuple = (timer.timers[name][0] + (time.time() - timer.timers[name][1]), None)
timer.timers[name] = time_tuple
if print_one is True:
print(name, ' = ', timer.timers[name][0])
if print_all is True:
print('\n')
col_width = max(len(key) for key in timer.timers)
for key in sorted(timer.timers):
print("".join(key.ljust(col_width)), ' = ', timer.timers[key][0])
if clean_all:
timer.timers = {}
return
def gmres(A, b, x0=None, tol=1e-05, restart=None, maxiter=None, xtype=None, M=None, callback=None, restrt=None, PC_side='right'):
'''
Wrapper for scipy gmres to use right or left preconditioner.
Solve the linear system A*x = b, using right or left preconditioning.
Inputs and outputs as in scipy gmres plus PC_side ('right' or 'left').
Right Preconditioner (default):
First solve A*P^{-1} * y = b for y
then solve P*x = y, for x.
Left Preconditioner;
Solve P^{-1}*A*x = P^{-1}*b
Use Generalized Minimal Residual to solve A x = b.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
Matrix that defines the linear system.
b : {array, matrix}
Right hand side of the linear system. It can be a matrix.
Returns
-------
x : {array, matrix}
The solution of the linear system.
info : int
Provides convergence information:
* 0 : success
* >0 : convergence to tolerance not achieved, number of iterations
* <0 : illegal input or breakdown
Other parameters
----------------
PC_side: {'right', 'left'}
Use right or left Preconditioner. Right preconditioner (default) uses
the real residual to determine convergence. Left preconditioner uses
a preconditioned residual (M*r^n = M*(b - A*x^n)) to determine convergence.
x0 : {array, matrix}
Initial guess for the linear system (zero by default).
tol : float
Tolerance. The solver finishes when the relative or the absolute residual
norm are below this tolerance.
restart : int, optional
Number of iterations between restarts.
Default is 20.
maxiter : int, optional
Maximum number of iterations.
xtype : {'f','d','F','D'}
This parameter is DEPRECATED --- avoid using it.
The type of the result. If None, then it will be determined from
A.dtype.char and b. If A does not have a typecode method then it
will compute A.matvec(x0) to get a typecode. To save the extra
computation when A does not have a typecode attribute use xtype=0
for the same type as b or use xtype='f','d','F',or 'D'.
This parameter has been superseded by LinearOperator.
M : {sparse matrix, dense matrix, LinearOperator}
Inverse of the preconditioner of A. By default M is None.
callback : function
User-supplied function to call after each iteration. It is called
as callback(rk), where rk is the current residual vector.
restrt : int, optional
DEPRECATED - use `restart` instead.
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 * x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
'''
# If left preconditioner (or no Preconditioner) just call scipy gmres
if PC_side == 'left' or M is None:
return scspla.gmres(A, b, M=M, x0=x0, tol=tol, atol=0, maxiter=maxiter, restart=restart, callback=callback)
# Create LinearOperator for A and P^{-1}
A_LO = scspla.aslinearoperator(A)
M_LO = scspla.aslinearoperator(M)
# Define new LinearOperator A*P^{-1}
def APinv(x,A,M):
return A.matvec(M.matvec(x))
APinv_partial = partial(APinv, A=A_LO, M=M_LO)
APinv_partial_LO = scspla.LinearOperator((b.size, b.size), matvec = APinv_partial, dtype='float64')
# Solve system A*P^{-1} * y = b
(y, info) = scspla.gmres(APinv_partial_LO, b, x0=None, tol=tol, atol=0, maxiter=maxiter, restart=restart, callback=callback)
# Solve system P*x = y
x = M_LO.matvec(y)
# Return solution and info
return x, info
|
gpl-3.0
|
DSLituiev/scikit-learn
|
examples/gaussian_process/plot_gpc_xor.py
|
104
|
2132
|
"""
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
ocefpaf/descriptive_oceanography
|
lecture-09/geostrophy/mdt_geostrophic_velocity.py
|
1
|
6958
|
# -*- coding: utf-8 -*-
#
# mdt_geostrophic_velocity.py
#
# purpose: Compute geostrophic currents using MDT
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 26-Sep-2013
# modified: Thu 26 Sep 2013 12:03:24 PM BRT
#
# obs: Need `mdt_cnes_cls2009_global_v1.1.nc`.
#
import iris
import numpy as np
import seawater as sw
import iris.plot as iplt
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import cartopy.feature as cfeature
from scipy.spatial import KDTree
from brewer2mpl import brewer2mpl
from oceans.ff_tools.ocfis import uv2spdir, spdir2uv
from oceans.ff_tools import wrap_lon360, wrap_lon180
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Colormap.
cmap = brewer2mpl.get_map('RdYlGn', 'diverging', 11, reverse=True).mpl_colormap
def geostrophic_current(ix, lat):
g = sw.g(lat.mean())
f = sw.f(lat.mean())
v = ix * g / f
return v
def fix_axis(lims, p=0.1):
"""Ajusta eixos + ou - p dos dados par exibir melhor os limites."""
min = lims.min() * (1 - p) if lims.min() > 0 else lims.min() * (1 + p)
max = lims.max() * (1 + p) if lims.max() > 0 else lims.max() * (1 - p)
return min, max
def plot_mdt(mdt, projection=ccrs.PlateCarree(), figsize=(12, 10)):
"""Plota 'Mean Dynamic Topography' no mapa global."""
fig = plt.figure(figsize=figsize)
ax = plt.axes(projection=projection)
ax.add_feature(cfeature.LAND, facecolor='0.75')
cs = iplt.pcolormesh(mdt, cmap=cmap)
ax.coastlines()
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1.5,
color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
cbar = fig.colorbar(cs, extend='both', orientation='vertical', shrink=0.6)
cbar.ax.set_title('[m]')
return fig, ax
def get_position(fig, ax):
"""Escolhe dois pontos para fazer o cálculo."""
points = np.array(fig.ginput(2))
lon, lat = points[:, 0], points[:, 1]
kw = dict(marker='o', markerfacecolor='k', markeredgecolor='w',
linestyle='none', alpha=0.65, markersize=5)
ax.plot(lon, lat, transform=ccrs.Geodetic(), **kw)
ax.set_title('')
plt.draw()
return lon, lat
def get_nearest(xi, yi, cube):
"""Encontra os dados mais próximos aos pontos escolhidos."""
x, y = cube.dim_coords
X, Y = np.meshgrid(x.points, y.points)
xi = wrap_lon360(xi)
tree = KDTree(zip(X.ravel(), Y.ravel()))
dist, indices = tree.query(np.array([xi, yi]).T)
indices = np.unravel_index(indices, X.shape)
lon = X[indices]
lat = Y[indices]
maskx = np.logical_and(x.points >= min(lon), x.points <= max(lon))
masky = np.logical_and(y.points >= min(lat), y.points <= max(lat))
maxnp = len(np.nonzero(maskx)[0]) + len(np.nonzero(masky)[0])
lons = np.linspace(lon[0], lon[1], maxnp)
lats = np.linspace(lat[0], lat[1], maxnp)
# Find all x, y, data in that line using the same KDTree obj.
dist, indices = tree.query(np.array([lons, lats]).T)
indices = np.unique(indices)
indices = np.unravel_index(indices, X.shape)
lons, lats = X[indices], Y[indices]
elvs = cube.data.T[indices]
dist, angle = sw.dist(lats, lons, 'km')
dist *= 1e3
dist = np.r_[0, dist.cumsum()]
return (lons, lats), (elvs, dist, angle)
def mid_point(arr):
return (arr[1:] + arr[:-1]) / 2
cube = iris.load_cube('mdt_cnes_cls2009_global_v1.1.nc',
iris.Constraint('Mean Dynamic Topography'))
# Data clean-up.
data = cube.data.filled(fill_value=np.NaN).copy()
data[data == 9999.0] = np.NaN
data = np.ma.masked_invalid(data)
cube.data = data
if __name__ == '__main__':
fig, ax = plot_mdt(cube, projection=ccrs.PlateCarree(), figsize=(12, 10))
_ = ax.set_title('Escolha dois pontos.')
lon, lat = get_position(fig, ax)
print('Longitude: %s\nLatitude: %s' % (lon, lat))
fig, ax = plot_mdt(cube, projection=ccrs.PlateCarree(), figsize=(8, 6))
kw = dict(marker='o', markerfacecolor='k', markeredgecolor='w',
linestyle='none', alpha=0.65, markersize=5)
_ = ax.plot(lon, lat, transform=ccrs.PlateCarree(), **kw)
(lons, lats), (elvs, dist, angle) = get_nearest(lon, lat, cube)
ix = np.diff(elvs) / np.diff(dist)
v = geostrophic_current(ix, lats.mean())
maxi = ix == ix.max()
dist *= 1e-3
fig, ax = plt.subplots(figsize=(10, 2))
fig.subplots_adjust(bottom=0.25)
ax.plot(dist, elvs)
ax.axis('tight')
ax.set_ylabel('Height [m]')
ax.set_xlabel('Distance [km]')
ax.set_title('Sea Surface Slope')
vmax = v.max() if v.max() > np.abs(v.min()) else v.min()
symbol = r'$\bigotimes$' if vmax > 0 else r'$\bigodot$'
_ = ax.text(dist[maxi], elvs[maxi], symbol, va='center', ha='center')
arrowprops = dict(rrowstyle="->", alpha=0.65,
connectionstyle="angle3,angleA=0,angleB=-90")
_ = ax.annotate(r'%2.2f m s$^{-1}$' % vmax, xy=(dist[maxi], elvs[maxi]),
xycoords='data', xytext=(-50, 30),
textcoords='offset points',
arrowprops=arrowprops)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title('Jet profile')
ax.set_xlabel('Distance [m]')
ax.set_ylabel(r'Velocity [m $^{-1}$]')
xm = mid_point(dist)
xm *= 1e-3
kw = dict(scale_units='xy', angles='xy', scale=1)
qk = ax.quiver(xm, [0]*len(xm), [0]*len(v), v, **kw)
_ = ax.set_ylim(fix_axis(v))
_ = ax.set_xlim(fix_axis(xm))
rot = 180 - np.abs(angle.mean()) # FIXME!
ang, spd = uv2spdir(0, v, rot=rot)
ui, vi = spdir2uv(spd, ang, deg=False)
dx = dy = 10
lon = wrap_lon360(lon)
xmin, xmax = map(int, [lon[0]-dx, lon[1]+dx])
ymin, ymax = map(int, (lat[0]-dy, lat[1]+dy))
coord_values = {'latitude': lambda cell: ymin <= cell <= ymax,
'longitude': lambda cell: xmin <= cell <= xmax}
cube = iris.load_cube('mdt_cnes_cls2009_global_v1.1.nc',
iris.Constraint(name='Mean Dynamic Topography',
coord_values=coord_values))
kw = dict(marker='o', markeredgecolor='w',
linestyle='none', alpha=0.65, markersize=5)
fig, ax = plot_mdt(cube, projection=ccrs.PlateCarree(), figsize=(10, 10))
ax.plot(lons, lats, transform=ccrs.PlateCarree(),
markerfacecolor='r', **kw)
ax.plot(lon, lat, transform=ccrs.PlateCarree(),
markerfacecolor='k', **kw)
x, y = map(mid_point, (lons, lats))
kw = dict(color='k', units='inches', alpha=0.65)
Q = ax.quiver(x, y, ui, vi, transform=ccrs.PlateCarree(), **kw)
ax.axis([wrap_lon180(xmin), wrap_lon180(xmax), ymin, ymax])
qk = ax.quiverkey(Q, 0.5, 0.05, 0.5, r'0.5 m s${-1}$',
fontproperties={'weight': 'bold'})
|
artistic-2.0
|
mbr0wn/gnuradio
|
gr-filter/examples/synth_filter.py
|
6
|
1806
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print("Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = numpy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
pyplot.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
aitoralmeida/morelab-coauthor-analyzer
|
NetworkAnalyzer.py
|
1
|
5061
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 05 16:42:59 2013
@author: aitor
"""
import networkx as nx
import matplotlib.pylab as plt
from collections import OrderedDict
import csv
verbose = True
def get_graph(path):
fh = open(path, 'rb')
G = nx.read_edgelist(fh)
fh.close()
#remove posible self loops
G.remove_edges_from(G.selfloop_edges())
return G
def write_csv_centralities(file_name, data):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
for d in data:
if verbose:
print "%s: %s" % (d, data[d])
writer.writerow([d, data[d]])
def write_csv_groups(file_name, groups):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
for e in groups:
if verbose:
print e
writer.writerow(e)
def write_csv_group(file_name, group):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(group)
def get_graph_info(G):
node_num = G.number_of_nodes()
edge_num = G.number_of_edges()
nodes = G.nodes()
if verbose:
print "The loaded network has %s nodes and %s edges\r\n" % (node_num, edge_num)
print "The nodes of the network are:"
for n in nodes:
print n
with open('./data/results/networkInfo.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(['nodes',node_num])
writer.writerow(['edges',edge_num])
def draw_graph(G):
nx.draw(G)
plt.savefig("./images/simpleNetwork.png")
if verbose:
plt.show()
#**********CENTRALITIES***********
def calculate_degree_centrality(G):
cent_degree = nx.degree_centrality(G)
sorted_cent_degree = OrderedDict(sorted(cent_degree.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Degree Centrality ***"
write_csv_centralities('./data/results/degreeCent.csv', sorted_cent_degree)
def calculate_betweenness_centrality(G):
cent_betweenness = nx.betweenness_centrality(G)
sorted_cent_betweenness = OrderedDict(sorted(cent_betweenness.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Betweenness Centrality ***"
write_csv_centralities('./data/results/betweennessCent.csv', sorted_cent_betweenness)
def calculate_closeness_centrality(G):
cent_closeness = nx.closeness_centrality(G)
sorted_cent_closeness = OrderedDict(sorted(cent_closeness.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Closeness Centrality ***"
write_csv_centralities('./data/results/closenessCent.csv', sorted_cent_closeness)
def calculate_eigenvector_centrality(G):
cent_eigenvector = nx.eigenvector_centrality(G)
sorted_cent_eigenvector = OrderedDict(sorted(cent_eigenvector.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Eigenvector Centrality ***"
write_csv_centralities('./data/results/eigenvectorCent.csv', sorted_cent_eigenvector)
def calculate_pagerank(G):
page_rank = nx.pagerank(G)
sorted_page_rank = OrderedDict(sorted(page_rank.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** PageRank ***"
write_csv_centralities('./data/results/pagerank.csv', sorted_page_rank)
#**********COMMUNITIES***********
def calculate_cliques(G):
cliques = list(nx.find_cliques(G))
if verbose:
print "\n\r*** Cliques ***"
write_csv_groups('./data/results/cliques.csv', cliques)
def calculate_main_k_core(G):
core_main = nx.k_core(G)
nx.draw(core_main)
plt.savefig("./images/kCoreMain.png")
if verbose:
print "\r\nk-Core: Main"
print core_main.nodes()
plt.show()
write_csv_group('./data/results/mainKCore.csv', core_main.nodes())
def calculate_k_core(G, K):
core_k = nx.k_core(G, k=K)
nx.draw(core_k)
plt.savefig("./images/kCore" + str(K) + ".png")
if verbose:
print "\r\nk-Core: " + str(K)
print core_k.nodes()
plt.show()
write_csv_group('./data/results/kCore' + str(K) + '.csv', core_k.nodes())
def calculate_k_clique(G, K):
communities = nx.k_clique_communities(G, K)
if verbose:
print "k-cliques " + str(K)
write_csv_groups('./data/results/kClique' + str(K) + '.csv', communities)
if __name__ == '__main__':
print 'Analizing co-author data from ' + "./data/coauthors.txt"
G = get_graph("./data/coauthors.txt")
get_graph_info(G)
draw_graph(G)
#centralities
calculate_degree_centrality(G)
calculate_betweenness_centrality(G)
calculate_closeness_centrality(G)
calculate_closeness_centrality(G)
calculate_pagerank(G)
#communities
calculate_cliques(G)
calculate_main_k_core(G)
calculate_k_core(G,4)
#calculate_k_clique(G, 2)
|
apache-2.0
|
mikaem/spectralDNS
|
demo/Isotropic.py
|
2
|
11543
|
"""
Homogeneous turbulence. See [1] for initialization and [2] for a section
on forcing the lowest wavenumbers to maintain a constant turbulent
kinetic energy.
[1] R. S. Rogallo, "Numerical experiments in homogeneous turbulence,"
NASA TM 81315 (1981)
[2] A. G. Lamorgese and D. A. Caughey and S. B. Pope, "Direct numerical simulation
of homogeneous turbulence with hyperviscosity", Physics of Fluids, 17, 1, 015106,
2005, (https://doi.org/10.1063/1.1833415)
"""
from __future__ import print_function
import warnings
import numpy as np
from numpy import pi, zeros, sum
from shenfun import Function
from shenfun.fourier import energy_fourier
from spectralDNS import config, get_solver, solve
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn("matplotlib not installed")
plt = None
def initialize(solver, context):
c = context
# Create mask with ones where |k| < Kf2 and zeros elsewhere
kf = config.params.Kf2
c.k2_mask = np.where(c.K2 <= kf**2, 1, 0)
np.random.seed(solver.rank)
k = np.sqrt(c.K2)
k = np.where(k == 0, 1, k)
kk = c.K2.copy()
kk = np.where(kk == 0, 1, kk)
k1, k2, k3 = c.K[0], c.K[1], c.K[2]
ksq = np.sqrt(k1**2+k2**2)
ksq = np.where(ksq == 0, 1, ksq)
E0 = np.sqrt(9./11./kf*c.K2/kf**2)*c.k2_mask
E1 = np.sqrt(9./11./kf*(k/kf)**(-5./3.))*(1-c.k2_mask)
Ek = E0 + E1
# theta1, theta2, phi, alpha and beta from [1]
theta1, theta2, phi = np.random.sample(c.U_hat.shape)*2j*np.pi
alpha = np.sqrt(Ek/4./np.pi/kk)*np.exp(1j*theta1)*np.cos(phi)
beta = np.sqrt(Ek/4./np.pi/kk)*np.exp(1j*theta2)*np.sin(phi)
c.U_hat[0] = (alpha*k*k2 + beta*k1*k3)/(k*ksq)
c.U_hat[1] = (beta*k2*k3 - alpha*k*k1)/(k*ksq)
c.U_hat[2] = beta*ksq/k
c.mask = c.T.get_mask_nyquist()
c.T.mask_nyquist(c.U_hat, c.mask)
solver.get_velocity(**c)
U_hat = solver.set_velocity(**c)
K = c.K
# project to zero divergence
U_hat[:] -= (K[0]*U_hat[0]+K[1]*U_hat[1]+K[2]*U_hat[2])*c.K_over_K2
if solver.rank == 0:
c.U_hat[:, 0, 0, 0] = 0.0
# Scale to get correct kinetic energy. Target from [2]
energy = 0.5*energy_fourier(c.U_hat, c.T)
target = config.params.Re_lam*(config.params.nu*config.params.kd)**2/np.sqrt(20./3.)
c.U_hat *= np.sqrt(target/energy)
if 'VV' in config.params.solver:
c.W_hat = solver.cross2(c.W_hat, c.K, c.U_hat)
config.params.t = 0.0
config.params.tstep = 0
c.target_energy = energy_fourier(c.U_hat, c.T)
def L2_norm(comm, u):
r"""Compute the L2-norm of real array a
Computing \int abs(u)**2 dx
"""
N = config.params.N
result = comm.allreduce(np.sum(u**2))
return result/np.prod(N)
def spectrum(solver, context):
c = context
uiui = np.zeros(c.U_hat[0].shape)
uiui[..., 1:-1] = 2*np.sum((c.U_hat[..., 1:-1]*np.conj(c.U_hat[..., 1:-1])).real, axis=0)
uiui[..., 0] = np.sum((c.U_hat[..., 0]*np.conj(c.U_hat[..., 0])).real, axis=0)
uiui[..., -1] = np.sum((c.U_hat[..., -1]*np.conj(c.U_hat[..., -1])).real, axis=0)
uiui *= (4./3.*np.pi)
# Create bins for Ek
Nb = int(np.sqrt(sum((config.params.N/2)**2)/3))
bins = np.array(range(0, Nb))+0.5
z = np.digitize(np.sqrt(context.K2), bins, right=True)
# Sample
Ek = np.zeros(Nb)
ll = np.zeros(Nb)
for i, k in enumerate(bins[1:]):
k0 = bins[i] # lower limit, k is upper
ii = np.where((z > k0) & (z <= k))
ll[i] = len(ii[0])
Ek[i] = (k**3 - k0**3)*np.sum(uiui[ii])
Ek = solver.comm.allreduce(Ek)
ll = solver.comm.allreduce(ll)
for i in range(Nb):
if not ll[i] == 0:
Ek[i] = Ek[i] / ll[i]
E0 = uiui.mean(axis=(1, 2))
E1 = uiui.mean(axis=(0, 2))
E2 = uiui.mean(axis=(0, 1))
## Rij
#for i in range(3):
# c.U[i] = c.FFT.ifftn(c.U_hat[i], c.U[i])
#X = c.FFT.get_local_mesh()
#R = np.sqrt(X[0]**2 + X[1]**2 + X[2]**2)
## Sample
#Rii = np.zeros_like(c.U)
#Rii[0] = c.FFT.ifftn(np.conj(c.U_hat[0])*c.U_hat[0], Rii[0])
#Rii[1] = c.FFT.ifftn(np.conj(c.U_hat[1])*c.U_hat[1], Rii[1])
#Rii[2] = c.FFT.ifftn(np.conj(c.U_hat[2])*c.U_hat[2], Rii[2])
#R11 = np.sum(Rii[:, :, 0, 0] + Rii[:, 0, :, 0] + Rii[:, 0, 0, :], axis=0)/3
#Nr = 20
#rbins = np.linspace(0, 2*np.pi, Nr)
#rz = np.digitize(R, rbins, right=True)
#RR = np.zeros(Nr)
#for i in range(Nr):
# ii = np.where(rz == i)
# RR[i] = np.sum(Rii[0][ii] + Rii[1][ii] + Rii[2][ii]) / len(ii[0])
#Rxx = np.zeros((3, config.params.N[0]))
#for i in range(config.params.N[0]):
# Rxx[0, i] = (c.U[0] * np.roll(c.U[0], -i, axis=0)).mean()
# Rxx[1, i] = (c.U[0] * np.roll(c.U[0], -i, axis=1)).mean()
# Rxx[2, i] = (c.U[0] * np.roll(c.U[0], -i, axis=2)).mean()
return Ek, bins, E0, E1, E2
k = []
w = []
kold = zeros(1)
im1 = None
energy_new = None
def update(context):
global k, w, im1, energy_new
c = context
params = config.params
solver = config.solver
curl_hat = Function(c.VT, buffer=c.work[(c.U_hat, 2, True)])
if solver.rank == 0:
c.U_hat[:, 0, 0, 0] = 0
if params.solver == 'VV':
c.U_hat = solver.cross2(c.U_hat, c.K_over_K2, c.W_hat)
energy_new = energy_fourier(c.U_hat, c.T)
energy_lower = energy_fourier(c.U_hat*c.k2_mask, c.T)
energy_upper = energy_new - energy_lower
alpha2 = (c.target_energy - energy_upper) /energy_lower
alpha = np.sqrt(alpha2)
#du = c.U_hat*c.k2_mask*(alpha)
#dus = energy_fourier(du*c.U_hat, c.T)
energy_old = energy_new
#c.dU[:] = alpha*c.k2_mask*c.U_hat
c.U_hat *= (alpha*c.k2_mask + (1-c.k2_mask))
energy_new = energy_fourier(c.U_hat, c.T)
assert np.sqrt((energy_new-c.target_energy)**2) < 1e-7, np.sqrt((energy_new-c.target_energy)**2)
if params.solver == 'VV':
c.W_hat = solver.cross2(c.W_hat, c.K, c.U_hat)
if (params.tstep % params.compute_energy == 0 or
params.tstep % params.plot_step == 0 and params.plot_step > 0):
solver.get_velocity(**c)
solver.get_curl(**c)
if 'NS' in params.solver:
solver.get_pressure(**c)
K = c.K
if plt is not None:
if params.tstep % params.plot_step == 0 and solver.rank == 0 and params.plot_step > 0:
#div_u = solver.get_divergence(**c)
if not plt.fignum_exists(1):
plt.figure(1)
#im1 = plt.contourf(c.X[1][:,:,0], c.X[0][:,:,0], div_u[:,:,10], 100)
im1 = plt.contourf(c.X[1][..., 0], c.X[0][..., 0], c.U[0, ..., 10], 100)
plt.colorbar(im1)
plt.draw()
else:
im1.ax.clear()
#im1.ax.contourf(c.X[1][:,:,0], c.X[0][:,:,0], div_u[:,:,10], 100)
im1.ax.contourf(c.X[1][..., 0], c.X[0][..., 0], c.U[0, ..., 10], 100)
im1.autoscale()
plt.pause(1e-6)
if params.tstep % params.compute_spectrum == 0:
Ek, _, _, _, _ = spectrum(solver, context)
f = h5py.File(context.spectrumname, driver='mpio', comm=solver.comm)
f['Turbulence/Ek'].create_dataset(str(params.tstep), data=Ek)
f.close()
if params.tstep % params.compute_energy == 0:
dx, L = params.dx, params.L
#ww = solver.comm.reduce(sum(curl*curl)/np.prod(params.N)/2)
duidxj = np.zeros(((3, 3)+c.U[0].shape), dtype=c.float)
for i in range(3):
for j in range(3):
duidxj[i, j] = c.T.backward(1j*K[j]*c.U_hat[i], duidxj[i, j])
ww2 = L2_norm(solver.comm, duidxj)*params.nu
#ww2 = solver.comm.reduce(sum(duidxj*duidxj))
ddU = np.zeros(((3,)+c.U[0].shape), dtype=c.float)
dU = solver.ComputeRHS(c.dU, c.U_hat, solver, **c)
for i in range(3):
ddU[i] = c.T.backward(dU[i], ddU[i])
ww3 = solver.comm.allreduce(sum(ddU*c.U))/np.prod(params.N)
##if solver.rank == 0:
##print('W ', params.nu*ww, params.nu*ww2, ww3, ww-ww2)
curl_hat = solver.cross2(curl_hat, K, c.U_hat)
dissipation = energy_fourier(curl_hat, c.T)
div_u = solver.get_divergence(**c)
#du = 1j*(c.K[0]*c.U_hat[0]+c.K[1]*c.U_hat[1]+c.K[2]*c.U_hat[2])
div_u = L2_norm(solver.comm, div_u)
#div_u2 = energy_fourier(solver.comm, 1j*(K[0]*c.U_hat[0]+K[1]*c.U_hat[1]+K[2]*c.U_hat[2]))
kk = 0.5*energy_new
eps = dissipation*params.nu
Re_lam = np.sqrt(20*kk**2/(3*params.nu*eps))
Re_lam2 = kk*np.sqrt(20./3.)/(params.nu*params.kd)**2
kold[0] = energy_new
e0, e1 = energy_new, L2_norm(solver.comm, c.U)
ww4 = (energy_new-energy_old)/2/params.dt
if solver.rank == 0:
k.append(energy_new)
w.append(dissipation)
print('%2.4f %2.6e %2.6e %2.6e %2.6e %2.6e %2.6e %2.6e %2.6e'%(params.t, e0, e1, eps, ww2, ww3, ww4, Re_lam, Re_lam2))
#if params.tstep % params.compute_energy == 1:
#if 'NS' in params.solver:
#kk2 = comm.reduce(sum(U.astype(float64)*U.astype(float64))*dx[0]*dx[1]*dx[2]/L[0]/L[1]/L[2]/2)
#if rank == 0:
#print 0.5*(kk2-kold[0])/params.dt
def init_from_file(filename, solver, context):
f = h5py.File(filename, driver="mpio", comm=solver.comm)
assert "0" in f["U/3D"]
U_hat = context.U_hat
s = context.T.local_slice(True)
U_hat[:] = f["U/3D/0"][:, s[0], s[1], s[2]]
if solver.rank == 0:
U_hat[:, 0, 0, 0] = 0.0
if 'VV' in config.params.solver:
context.W_hat = solver.cross2(context.W_hat, context.K, context.U_hat)
context.target_energy = energy_fourier(U_hat, context.T)
f.close()
if __name__ == "__main__":
import h5py
config.update(
{'nu': 0.005428, # Viscosity (not used, see below)
'dt': 0.002, # Time step
'T': 5, # End time
'L': [2.*pi, 2.*pi, 2.*pi],
'checkpoint': 100,
'write_result': 1e8,
'dealias': '3/2-rule',
}, "triplyperiodic"
)
config.triplyperiodic.add_argument("--N", default=[60, 60, 60], nargs=3,
help="Mesh size. Trumps M.")
config.triplyperiodic.add_argument("--compute_energy", type=int, default=100)
config.triplyperiodic.add_argument("--compute_spectrum", type=int, default=1000)
config.triplyperiodic.add_argument("--plot_step", type=int, default=1000)
config.triplyperiodic.add_argument("--Kf2", type=int, default=3)
config.triplyperiodic.add_argument("--kd", type=float, default=50.)
config.triplyperiodic.add_argument("--Re_lam", type=float, default=84.)
sol = get_solver(update=update, mesh="triplyperiodic")
config.params.nu = (1./config.params.kd**(4./3.))
context = sol.get_context()
initialize(sol, context)
#init_from_file("NS_isotropic_60_60_60_c.h5", sol, context)
context.hdf5file.filename = "NS_isotropic_{}_{}_{}".format(*config.params.N)
Ek, bins, E0, E1, E2 = spectrum(sol, context)
context.spectrumname = context.hdf5file.filename+".h5"
f = h5py.File(context.spectrumname, mode='w', driver='mpio', comm=sol.comm)
f.create_group("Turbulence")
f["Turbulence"].create_group("Ek")
bins = np.array(bins)
f["Turbulence"].create_dataset("bins", data=bins)
f.close()
solve(sol, context)
from mpi4py_fft import generate_xdmf
if sol.rank == 0:
generate_xdmf(context.hdf5file.filename+"_w.h5")
|
gpl-3.0
|
cogmission/nupic.research
|
projects/sequence_prediction/continuous_sequence/data/processSantaFeDataset.py
|
13
|
2104
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pandas as pd
import csv
def convertDatToCSV(inputFileName, outputFileName, Nrpts=1, maxLength=None):
df = pd.read_table(inputFileName, header=None, names=['value'])
if maxLength is None:
maxLength = len(df)
outputFile = open(outputFileName,"w")
csvWriter = csv.writer(outputFile)
if Nrpts==1:
csvWriter.writerow(['step', 'data'])
csvWriter.writerow(['float', 'float'])
csvWriter.writerow(['', ''])
for i in xrange(maxLength):
csvWriter.writerow([i,df['value'][i]])
else:
csvWriter.writerow(['step', 'data', 'reset'])
csvWriter.writerow(['float', 'float', 'int'])
csvWriter.writerow(['', '', 'R'])
for _ in xrange(Nrpts):
for i in xrange(maxLength):
if i==0:
reset = 1
else:
reset = 0
csvWriter.writerow([i, df['value'][i], reset])
outputFile.close()
inputFileName = 'SantaFe_A_cont.dat'
outputFileName = 'SantaFe_A_cont.csv'
convertDatToCSV(inputFileName, outputFileName, maxLength=100)
inputFileName = 'SantaFe_A.dat'
outputFileName = 'SantaFe_A.csv'
convertDatToCSV(inputFileName, outputFileName, Nrpts=1)
|
agpl-3.0
|
hoechenberger/pylibnidaqmx
|
nidaqmx/wxagg_plot.py
|
16
|
4515
|
import os
import sys
import time
import traceback
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
import wx
from matplotlib.figure import Figure
class PlotFigure(wx.Frame):
def OnKeyPressed (self, event):
key = event.key
if key=='q':
self.OnClose(event)
def __init__(self, func, timer_period):
wx.Frame.__init__(self, None, -1, "Plot Figure")
self.fig = Figure((12,9), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.canvas.mpl_connect('key_press_event', self.OnKeyPressed)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
self.func = func
self.plot = None
self.timer_period = timer_period
self.timer = wx.Timer(self)
self.is_stopped = False
if os.name=='nt':
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
self.Bind(wx.EVT_TIMER, self.OnTimerWrap, self.timer)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.timer.Start(timer_period)
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnClose(self, event):
self.is_stopped = True
print 'Closing PlotFigure, please wait.'
self.timer.Stop()
self.Destroy()
def OnTimerWrap (self, evt):
if self.is_stopped:
print 'Ignoring timer callback'
return
t = time.time()
try:
self.OnTimer (evt)
except KeyboardInterrupt:
self.OnClose(evt)
duration = 1000*(time.time () - t)
if duration > self.timer_period:
print 'Changing timer_period from %s to %s msec' % (self.timer_period, 1.2*duration)
self.timer_period = 1.2*duration
self.timer.Stop()
self.timer.Start (self.timer_period)
def OnTimer(self, evt):
try:
xdata, ydata_list, legend = self.func()
except RuntimeError:
traceback.print_exc(file=sys.stderr)
self.OnClose(evt)
return
if len (ydata_list.shape)==1:
ydata_list = ydata_list.reshape((1, ydata_list.size))
if self.plot is None:
self.axes = self.fig.add_axes([0.1,0.1,0.8,0.8])
l = []
for ydata in ydata_list:
l.extend ([xdata, ydata])
self.plot = self.axes.plot(*l)
self.axes.set_xlabel('Seconds')
self.axes.set_ylabel('Volts')
self.axes.set_title('nof samples=%s' % (len(xdata)))
self.axes.legend (legend)
else:
self.axes.set_xlim(xmin = xdata[0], xmax=xdata[-1])
ymin, ymax = 1e9,-1e9
for line, data in zip (self.plot, ydata_list):
line.set_xdata(xdata)
line.set_ydata(data)
ymin, ymax = min (data.min (), ymin), max (data.max (), ymax)
dy = (ymax-ymin)/20
self.axes.set_ylim(ymin=ymin-dy, ymax=ymax+dy)
self.canvas.draw()
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
def animated_plot(func, timer_period):
app = wx.PySimpleApp(clearSigInt=False)
frame = PlotFigure(func, timer_period)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
from numpy import *
import time
start_time = time.time ()
def func():
x = arange (100, dtype=float)/100*pi
d = sin (x+(time.time ()-start_time))
return x, d, ['sin (x+time)']
try:
animated_plot (func, 1)
except Exception, msg:
print 'Got exception: %s' % ( msg)
else:
print 'Exited normally'
|
bsd-3-clause
|
signed/intellij-community
|
python/helpers/pydev/pydev_ipython/inputhook.py
|
11
|
19160
|
# coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_QT5 = 'qt5'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version # @UndefinedVariable
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) # @UndefinedVariable
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp() # @UndefinedVariable
if app is None:
app = wx.App(redirect=False, clearSigInt=False) # @UndefinedVariable
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_qt5(self, app=None):
from pydev_ipython.inputhookqt5 import create_inputhook_qt5
app, inputhook_qt5 = create_inputhook_qt5(self, app)
self.set_inputhook(inputhook_qt5)
self._current_gui = GUI_QT5
app._in_event_loop = True
self._apps[GUI_QT5] = app
return app
def disable_qt5(self):
if GUI_QT5 in self._apps:
self._apps[GUI_QT5]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK # @UnresolvedImport
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_qt5 = inputhook_manager.enable_qt5
disable_qt5 = inputhook_manager.disable_qt5
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_QT5: enable_qt5,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_QT5",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_qt5",
"disable_qt5",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
|
apache-2.0
|
hakii27/PythonVersionMaster
|
Results/OneDimDot/MCTDHF/w=05/DensityPlot.py
|
1
|
1339
|
infile = open("DensityCC_w=05_L=10.txt",'r')
infile2 = open("DensityFCI_w=05_N=2_L=6_t=10.txt",'r')
infile3 = open("DensityCCSD_w=05_N=2_L=6_t=10.txt",'r')
densityCC_HF = []
densityFCI = []
densityCC2 = []
infile.next()
infile.next()
infile2.next()
infile2.next()
infile3.next()
infile3.next()
for line in infile:
tmp = line.split(",")
tmp2 = tmp[0].split("(")
d = float(tmp2[1])
densityCC_HF.append(d)
for line in infile2:
tmp = line.split(",")
tmp2 = tmp[0].split("(")
d = float(tmp2[1])
print d
densityFCI.append(d)
for line in infile3:
tmp = line.split(",")
tmp2 = tmp[0].split("(")
d = float(tmp2[1])
print d
densityCC2.append(d)
from numpy import *
Lx = 10
densityCC_HF = array(densityCC_HF)
densityFCI = array(densityFCI)
densityCC2 = array(densityCC2)
#densityCC_HF = array(densityCC_HF)
x = linspace(-Lx,Lx,len(densityFCI))
dx = x[1]-x[0]
print sum(densityFCI)
print sum(densityCC_HF)
print sum(densityCC2)
import matplotlib.pyplot as plt
plt.figure(1)
plt.title("Onebody Density for w=0.5 FCI vs. CCSD")
plt.plot(x,densityCC2/dx,'-ob',x,densityFCI/dx,'-r') #,t_vec,EsinPert,'-r')
plt.legend(["CCSD","FCI"])
plt.xlabel("x",fontsize=16)
plt.ylabel("$p(x,x)$",fontsize=16)
plt.figure(2)
plt.title("Difference")
plt.semilogy(x,abs(densityCC2-densityFCI)/dx,'o')
plt.show()
|
lgpl-3.0
|
awanke/bokeh
|
bokeh/compat/bokeh_exporter.py
|
38
|
1508
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from matplotlib.collections import LineCollection, PolyCollection
from .mplexporter.exporter import Exporter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehExporter(Exporter):
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
if isinstance(collection, LineCollection):
self.renderer.make_line_collection(collection)
elif isinstance(collection, PolyCollection):
self.renderer.make_poly_collection(collection)
else:
super(BokehExporter, self).draw_collection(ax, collection, force_pathtrans, force_offsettrans)
def draw_patch(self, ax, patch, force_trans=None):
super(BokehExporter, self).draw_patch(ax, patch, force_trans)
|
bsd-3-clause
|
basilfx/RIOT
|
tests/pkg_utensor/generate_digit.py
|
19
|
1149
|
#!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = tf.keras.datasets.mnist.load_data()
data = mnist_test[args.index]
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
|
lgpl-2.1
|
burgerdev/hostload
|
scripts/demo_mg.py
|
1
|
4983
|
# -*- coding: utf-8 -*-
import warnings
import tempfile
from datetime import datetime
from collections import OrderedDict
try:
from matplotlib import pyplot as plt
except ImportError:
_plot_available = False
else:
_plot_available = True
from pylearn2.models import mlp
from tsdl.data.integrationdatasets import OpMackeyGlass
# workflow
from tsdl.workflow import Workflow
# features
from tsdl.features import OpRecent
from tsdl.targets import OpExponentiallySegmentedPattern
from tsdl.split import OpTrainTestSplit
from tsdl.report import OpRegressionReport
# classifiers
from tsdl.classifiers import OpSVMTrain
from tsdl.classifiers import OpSVMPredict
from tsdl.classifiers import OpMLPTrain
from tsdl.classifiers import OpMLPPredict
from tsdl.classifiers.mlp_init import LeastSquaresWeightInitializer
from tsdl.classifiers.mlp_init import PCAWeightInitializer
from tsdl.classifiers.mlp_init import StandardWeightInitializer
# caches
from tsdl.data import OpPickleCache
from tsdl.data import OpHDF5Cache
# train extensions
from tsdl.tools.extensions import ProgressMonitor
# options available for initialization
_train_choice = OrderedDict()
_train_choice["random"] = {"class": StandardWeightInitializer}
_train_choice["pca"] = tuple([{"class": init}
for init in (PCAWeightInitializer,
PCAWeightInitializer,
LeastSquaresWeightInitializer)])
_train_choice["grid"] = None
_train_choice["svm"] = None
def _get_conf(args):
if args.mode == "svm":
train = {"class": OpSVMTrain}
predict = {"class": OpSVMPredict}
else:
if args.mode not in _train_choice or _train_choice[args.mode] is None:
raise NotImplementedError(
"mode '{}' not implemented yet".format(args.mode))
train = {"class": OpMLPTrain,
"weight_initializer": _train_choice[args.mode],
"layer_sizes": (100, 10),
"layer_classes": (mlp.Sigmoid,),
"max_epochs": args.epochs,
"terminate_early": False,
"extensions": ({"class": ProgressMonitor,
"channel": "train_objective"},)}
predict = {"class": OpMLPPredict}
config = {"class": Workflow,
"source": {"class": OpMackeyGlass,
"shape": (10000,)},
"features": {"class": OpRecent, "window_size": 8},
"target": {"class": OpExponentiallySegmentedPattern,
"baseline_size": 32,
"num_segments": 1},
"split": {"class": OpTrainTestSplit},
"classifierCache": {"class": OpPickleCache},
"train": train,
"predict": predict,
"predictionCache": {"class": OpHDF5Cache},
"report": {"class": OpRegressionReport,
"levels": 50}}
return config
def main(args):
conf = _get_conf(args)
tempdir = args.workingdir
if tempdir is None:
prefix = "{}_{:%Y-%m-%d_%H-%M}".format(args.mode, datetime.now())
tempdir = tempfile.mkdtemp(prefix=prefix)
workflow = Workflow.build(conf, workingdir=tempdir)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
workflow.run()
if args.plot:
assert _plot_available, "matplotlib is needed for option --plot"
ground_truth = workflow.Target[...].wait()
prediction = workflow.Prediction[...].wait()
plt.figure()
plt.hold(True)
plt.plot(ground_truth, 'b')
plt.plot(prediction, 'r')
plt.hold(False)
plt.show()
print("output written to dir {}".format(tempdir))
if __name__ == "__main__":
from argparse import ArgumentParser
help_text = """
This script can be used to demonstrate the differences in training a neural
network with either random initialization or a more sophisticated
initialization. For comparison to non-NN approaches, SVM regression is
available.
"""
default_epochs = 100
mode_help = "use this learning tool (options: {})".format(
", ".join(_train_choice.keys()))
parser = ArgumentParser(description=help_text)
parser.add_argument("-d", "--workingdir", action="store", default=None,
help="working directory (default: temp dir)")
parser.add_argument("-m", "--mode", action="store", default="svm",
help=mode_help)
parser.add_argument("-p", "--plot", action="store_true",
help="plot results (default: don't plot)",
default=False)
parser.add_argument("-e", "--epochs", action="store", type=int,
help="max epochs (default: {})".format(default_epochs),
default=default_epochs, metavar="<int>")
args = parser.parse_args()
main(args)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.