repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nbortolotti/tensorflow-code-experiences
|
experiences/bq_integration/bqtensor.py
|
1
|
1787
|
import tensorflow as tf
import pandas as pd
import ConfigParser
tf.enable_eager_execution() # eager
config = ConfigParser.ConfigParser()
config.read('config.env')
project_id = config.get('google','cloud_id') #is needed use a cloud project id
df_train = pd.io.gbq.read_gbq('''SELECT * FROM [socialagilelearning:iris.training]''', project_id=project_id, private_key=config.get('google','service_key'), verbose=False)
df_test = pd.io.gbq.read_gbq('''SELECT * FROM [socialagilelearning:iris.test]''', project_id=project_id, private_key=config.get('google','service_key'), verbose=False)
categories='Plants'
train_plantfeatures, train_categories = df_train, df_train.pop(categories)
test_plantfeatures, test_categories = df_test, df_test.pop(categories)
y_categorical = tf.contrib.keras.utils.to_categorical(train_categories, num_classes=3)
y_categorical_test = tf.contrib.keras.utils.to_categorical(test_categories, num_classes=3)
dataset = tf.data.Dataset.from_tensor_slices((train_plantfeatures.values, y_categorical))
dataset = dataset.batch(32)
dataset = dataset.shuffle(1000)
dataset = dataset.repeat()
dataset_test = tf.data.Dataset.from_tensor_slices((test_plantfeatures.values, y_categorical_test))
dataset_test = dataset_test.batch(32)
dataset_test = dataset_test.shuffle(1000)
dataset_test = dataset_test.repeat()
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, input_dim=4),
tf.keras.layers.Dense(3, activation=tf.nn.softmax),
])
opt = tf.train.GradientDescentOptimizer(learning_rate=0.001)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=32, epochs=100, verbose=1)
loss, accuracy = model.evaluate(dataset_test, steps=32)
print("loss:%f"% (loss))
print("accuracy: %f"% (accuracy))
|
apache-2.0
|
petercable/xray
|
xray/test/test_dataset.py
|
1
|
87064
|
from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import dask.array as da
except ImportError:
pass
import numpy as np
import pandas as pd
from xray import (align, concat, conventions, backends, Dataset, DataArray,
Variable, Coordinate, auto_combine, open_dataset,
set_options)
from xray.core import indexing, utils
from xray.core.pycompat import iteritems, OrderedDict
from . import (TestCase, unittest, InaccessibleArray, UnexpectedDataAccess,
requires_dask)
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1'], dtype='int64'))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3],
dtype='int64'))
return obj
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
with set_options(display_width=100):
max_len = max(map(len, repr(data).split('\n')))
assert 90 < max_len < 100
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': x1, 'x': z})
with self.assertRaisesRegexp(TypeError, 'must be an array or'):
Dataset({'x': (1, 2, 3, 4, 5, 6, 7)})
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
Dataset({'x': 0, 'y': ('x', [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data)
data = {'x': DataArray(0, coords={'y': 1}), 'y': [1, 1]}
actual = Dataset(data)
expected = Dataset({'x': 0}, {'y': [1, 1]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'redundant variables and co'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertNotIn('aasldfjalskdfj', ds.variables)
self.assertIn('dim1', repr(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertIn('dim1', repr(ds.indexes))
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
self.assertEqual(Dataset({'x': np.int64(1),
'y': np.float32([1, 2])}).nbytes, 16)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data.coords['x'] = ('x', [-1])
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
actual = data.copy(deep=True)
actual.coords.update({'c': 11})
expected = data.merge({'c': 11}).set_coords('c')
self.assertDatasetIdentical(expected, actual)
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_equals_failures(self):
data = create_test_data()
self.assertFalse(data.equals('foo'))
self.assertFalse(data.identical(123))
self.assertFalse(data.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
self.assertIsInstance(v.data, np.ndarray)
self.assertEqual(data.chunks, {})
reblocked = data.chunk()
for v in reblocked.variables.values():
self.assertIsInstance(v.data, da.Array)
expected_chunks = dict((d, (s,)) for d, s in data.dims.items())
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk({'time': 5, 'dim1': 5, 'dim2': 5, 'dim3': 5})
expected_chunks = {'time': (5,) * 4, 'dim1': (5, 3),
'dim2': (5, 4), 'dim3': (5, 5)}
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
# reblock on already blocked data
reblocked = reblocked.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
self.assertDatasetIdentical(reblocked, data)
with self.assertRaisesRegexp(ValueError, 'some chunks'):
data.chunk({'foo': 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({'dim1': 'foobar'})
ds.set_coords('var1')
ds.drop('var1')
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_isel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(pdim1), )
actual = data.isel_points(dim1=pdim1, dim2=pdim2)
assert 'points' in actual.coords
np.testing.assert_array_equal(pdim1, actual['dim1'])
# test that the order of the indexers doesn't matter
self.assertDatasetIdentical(data.isel_points(dim1=pdim1, dim2=pdim2),
data.isel_points(dim2=pdim2, dim1=pdim1))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
data.isel_points(dim1=[1, 2], dim2=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
data.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1, 2, 3], dim2=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
data.isel_points(dim1=1, dim2=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not valid'):
data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2')
# test to be sure we keep around variables that were not indexed
ds = Dataset({'x': [1, 2, 3, 4], 'y': 0})
actual = ds.isel_points(x=[0, 1, 2])
self.assertDataArrayIdentical(ds['y'], actual['y'])
# tests using index or DataArray as a dim
stations = Dataset()
stations['station'] = ('station', ['A', 'B', 'C'])
stations['dim1s'] = ('station', [1, 2, 3])
stations['dim2s'] = ('station', [4, 5, 1])
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=stations['station'])
assert 'station' in actual.coords
assert 'station' in actual.dims
self.assertDataArrayIdentical(actual['station'].drop(['dim1', 'dim2']),
stations['station'])
# make sure we get the default points coordinate when a list is passed
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=['A', 'B', 'C'])
assert 'points' in actual.coords
# can pass a numpy array
data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))
def test_sel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
actual = data.sel_points(dim1=data.dim1[pdim1], dim2=data.dim2[pdim2],
dim3=data.dim3[pdim3], dim='test_coord')
self.assertDatasetIdentical(expected, actual)
data = Dataset({'foo': (('x', 'y'), np.arange(9).reshape(3, 3))})
expected = Dataset({'foo': ('points', [0, 4, 8])},
{'x': ('points', range(3)),
'y': ('points', range(3))})
actual = data.sel_points(x=[0.1, 1.1, 2.5], y=[0, 1.2, 2.0],
method='pad')
self.assertDatasetIdentical(expected, actual)
def test_sel_method(self):
data = create_test_data()
if pd.__version__ >= '0.16':
expected = data.sel(dim1=1)
actual = data.sel(dim1=0.95, method='nearest')
self.assertDatasetIdentical(expected, actual)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method='backfill')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(NotImplementedError, 'slice objects'):
data.sel(dim2=slice(1, 3), method='ffill')
with self.assertRaisesRegexp(TypeError, '``method``'):
# this should not pass silently
data.sel(data)
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
with self.assertRaisesRegexp(ValueError, 'invalid value for join'):
align(left, right, join='foobar')
with self.assertRaises(TypeError):
align(left, right, foo='bar')
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_swap_dims(self):
original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42})
expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')})
actual = original.swap_dims({'x': 'y'})
self.assertDatasetIdentical(expected, actual)
self.assertIsInstance(actual.variables['y'], Coordinate)
self.assertIsInstance(actual.variables['x'], Variable)
roundtripped = actual.swap_dims({'y': 'x'})
self.assertDatasetIdentical(original.set_coords('y'), roundtripped)
actual = original.copy()
actual.swap_dims({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot swap'):
original.swap_dims({'y': 'x'})
with self.assertRaisesRegexp(ValueError, 'replacement dimension'):
original.swap_dims({'x': 'z'})
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_merge(self):
data = create_test_data()
ds1 = data[['var1']]
ds2 = data[['var3']]
expected = data[['var1', 'var3']]
actual = ds1.merge(ds2)
self.assertDatasetIdentical(expected, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(expected, actual)
actual = data.merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.reset_coords(drop=True).merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.merge(data.reset_coords(drop=True))
self.assertDatasetIdentical(data, actual)
with self.assertRaises(ValueError):
ds1.merge(ds2.rename({'var3': 'var1'}))
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.reset_coords().merge(data)
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': ('y', [0, 0])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(ds2, actual)
actual = ds1.copy()
actual.update(ds2)
self.assertDatasetIdentical(ds2, actual)
ds1 = Dataset({'x': np.nan})
ds2 = Dataset({'x': ('y', [np.nan, np.nan])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
def test_merge_compat(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': 1})
for compat in ['broadcast_equals', 'equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': [0, 0]})
for compat in ['equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': ((), 0, {'foo': 'bar'})})
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat='identical')
with self.assertRaisesRegexp(ValueError, 'compat=\S+ invalid'):
ds1.merge(ds2, compat='foobar')
def test_merge_auto_align(self):
ds1 = Dataset({'a': ('x', [1, 2])})
ds2 = Dataset({'b': ('x', [3, 4]), 'x': [1, 2]})
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
self.assertDatasetIdentical(expected, ds1.merge(ds2))
self.assertDatasetIdentical(expected, ds2.merge(ds1))
expected = expected.isel(x=slice(2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='left'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='right'))
expected = expected.isel(x=slice(1, 2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='inner'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='inner'))
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable, [data['dim3']],
name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data['var1'] + 1
expected = data['var1'] + 1
expected.name = (3, 4)
self.assertDataArrayIdentical(expected, data[(3, 4)])
with self.assertRaisesRegexp(KeyError, "('var1', 'var2')"):
data[('var1', 'var2')]
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_assign(self):
ds = Dataset()
actual = ds.assign(x = [0, 1, 2], y = 2)
expected = Dataset({'x': [0, 1, 2], 'y': 2})
self.assertDatasetIdentical(actual, expected)
self.assertEqual(list(actual), ['x', 'y'])
self.assertDatasetIdentical(ds, Dataset())
actual = actual.assign(y = lambda ds: ds.x ** 2)
expected = Dataset({'y': ('x', [0, 1, 4])})
self.assertDatasetIdentical(actual, expected)
actual = actual.assign_coords(z = 2)
expected = Dataset({'y': ('x', [0, 1, 4])}, {'z': 2})
self.assertDatasetIdentical(actual, expected)
ds = Dataset({'a': ('x', range(3))}, {'b': ('x', ['A'] * 2 + ['B'])})
actual = ds.groupby('b').assign(c = lambda ds: 2 * ds.a)
expected = ds.merge({'c': ('x', [0, 2, 4])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign(c = lambda ds: ds.a.sum())
expected = ds.merge({'c': ('x', [1, 1, 2])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign_coords(c = lambda ds: ds.a.sum())
expected = expected.set_coords('c')
self.assertDatasetIdentical(actual, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must be 1 dimensional'):
data.groupby('var1')
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
expected['yonly'] = expected['yonly'].variable.expand_dims({'x': 3})
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': (data['xonly'].mean().variable
.expand_dims({'letters': 2})),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ((ds + Variable('dim3', np.zeros(10)))
.transpose('dim3', 'dim1', 'dim2', 'time'))
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
grouped + ds
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
ds + grouped
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
ds += grouped
ds = Dataset({'x': ('time', np.arange(100)),
'time': pd.date_range('2000-01-01', periods=100)})
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds + ds.groupby('time.month')
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': pd.date_range('20100101', periods=3)})
grouped = ds.groupby('t.day')
actual = grouped - grouped.mean()
expected = Dataset({'x': ('t', [0, 0, 0])},
ds[['t', 't.day']])
self.assertDatasetIdentical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({'foo': ('x', [1, 2, 3, 4])},
{'bar': ('x', [1, 1, 2, np.nan])})
actual = ds.groupby('bar').mean()
expected = Dataset({'foo': ('bar', [1.5, 3]), 'bar': [1, 2]})
self.assertDatasetIdentical(actual, expected)
def test_resample_and_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
actual = ds.resample('1D', dim='time', how='first')
expected = ds.isel(time=[0, 4, 8])
self.assertDatasetIdentical(expected, actual)
# upsampling
expected_time = pd.date_range('2000-01-01', freq='3H', periods=19)
expected = ds.reindex(time=expected_time)
for how in ['mean', 'sum', 'first', 'last', np.mean]:
actual = ds.resample('3H', 'time', how=how)
self.assertDatasetEqual(expected, actual)
def test_to_array(self):
ds = Dataset(OrderedDict([('a', 1), ('b', ('x', [1, 2, 3]))]),
coords={'c': 42}, attrs={'Conventions': 'None'})
data = [[1, 1, 1], [1, 2, 3]]
coords = {'x': range(3), 'c': 42, 'variable': ['a', 'b']}
dims = ('variable', 'x')
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
self.assertDataArrayIdentical(expected, actual)
actual = ds.to_array('abc', name='foo')
expected = expected.rename({'variable': 'abc'}).rename('foo')
self.assertDataArrayIdentical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame(np.array([x, y]).T, columns=['a', 'b'],
index=pd.Index(t, name='t'))
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords('b').to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({'w': (('x', 'y'), w)})
ds['y'] = ('y', list('abc'))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame(w.reshape(-1), columns=['w'], index=exp_index)
actual = ds.to_dataframe()
self.assertTrue(expected.equals(actual))
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ('index', [1])})
self.assertDatasetIdentical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset()
self.assertDatasetIdentical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({'x': pd.Index(['bar']),
'a': ('y', np.array([1], 'int64'))}).isel(x=0)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ['a', 'x']]
expected = pd.DataFrame([[1, 'bar']], index=pd.Index([0], name='y'),
columns=['a', 'x'])
assert expected.equals(actual), (expected, actual)
ds = Dataset({'x': np.array([0], 'int64'),
'y': np.array([1], 'int64')})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=['x', 'y'])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ['foo', 'foo']
with self.assertRaisesRegexp(ValueError, 'non-unique columns'):
Dataset.from_dataframe(df)
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
self.assertDatasetIdentical(data, roundtripped)
# regression test for #167:
self.assertEqual(data.dims, roundtripped.dims)
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({'foo': (('a', 'b'), x), 'bar': (('b', y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna('a')
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna('b')
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', subset=['foo', 'bar'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna('b', subset=['foo'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna('b', subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', subset=[])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', subset=['bar'])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', how='all')
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', how='all', subset=['bar'])
expected = ds.isel(b=[0, 1, 2])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=1, subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=2)
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', thresh=4)
expected = ds.isel(b=[1, 2, 3])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', thresh=3)
expected = ds.isel(a=[1, 3])
self.assertDatasetIdentical(actual, ds)
with self.assertRaisesRegexp(ValueError, 'a single dataset dimension'):
ds.dropna('foo')
with self.assertRaisesRegexp(ValueError, 'invalid how'):
ds.dropna('a', how='somehow')
with self.assertRaisesRegexp(TypeError, 'must specify how or thresh'):
ds.dropna('a', how=None)
def test_fillna(self):
ds = Dataset({'a': ('x', [np.nan, 1, np.nan, 3])})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({'a': ('x', [-1, 1, -1, 3])})
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': -1})
self.assertDatasetIdentical(expected, actual)
other = Dataset({'a': -1})
actual = ds.fillna(other)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': other.a})
self.assertDatasetIdentical(expected, actual)
# fill with range(4)
b = DataArray(range(4), dims='x')
actual = ds.fillna(b)
expected = b.rename('a').to_dataset()
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(expected)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(range(4))
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(b[:3])
self.assertDatasetIdentical(expected, actual)
# left align variables
ds['b'] = np.nan
actual = ds.fillna({'a': -1, 'c': 'foobar'})
expected = Dataset({'a': ('x', [-1, 1, -1, 3]), 'b': np.nan})
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna({'x': 0})
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna(Dataset(coords={'a': 0}))
# groupby
expected = Dataset({'a': ('x', range(4))})
for target in [ds, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = ds.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDatasetIdentical(expected, actual)
actual = ds.groupby('b').fillna(Dataset({'a': ('b', [0, 2])}))
self.assertDatasetIdentical(expected, actual)
def test_where(self):
ds = Dataset({'a': ('x', range(5))})
expected = Dataset({'a': ('x', [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a.values > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(True)
self.assertDatasetIdentical(ds, actual)
expected = ds.copy(deep=True)
expected['a'].values = [np.nan] * 5
actual = ds.where(False)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
self.assertDatasetIdentical(expected, actual)
# groupby
ds = Dataset({'a': ('x', range(5))}, {'c': ('x', [0, 0, 1, 1, 1])})
cond = Dataset({'a': ('c', [True, False])})
expected = ds.copy(deep=True)
expected['a'].values = [0, 1] + [np.nan] * 3
actual = ds.groupby('c').where(cond)
self.assertDatasetIdentical(expected, actual)
def test_reduce(self):
data = create_test_data()
self.assertEqual(len(data.mean().coords), 0)
actual = data.max()
expected = Dataset(dict((k, v.max())
for k, v in iteritems(data.data_vars)))
self.assertDatasetEqual(expected, actual)
self.assertDatasetEqual(data.min(dim=['dim1']),
data.min(dim='dim1'))
for reduct, expected in [('dim2', ['dim1', 'dim3', 'time']),
(['dim2', 'time'], ['dim1', 'dim3']),
(('dim2', 'time'), ['dim1', 'dim3']),
((), ['dim1', 'dim2', 'dim3', 'time'])]:
actual = data.min(dim=reduct).dims
print(reduct, actual, expected)
self.assertItemsEqual(actual, expected)
self.assertDatasetEqual(data.mean(dim=[]), data)
def test_reduce_bad_dim(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'Dataset does not contain'):
ds = data.mean(dim='bad_dim')
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {'var4': ['dim1', 'dim2']}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.random_integers(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {'foo': 'variable'})
self.assertTrue('var4' not in data1.mean())
self.assertDatasetEqual(data1.mean(), data2.mean())
self.assertDatasetEqual(data1.mean(dim='dim1'),
data2.mean(dim='dim1'))
def test_reduce_strings(self):
expected = Dataset({'x': 'a'})
ds = Dataset({'x': ('y', ['a', 'b'])})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 'b'})
actual = ds.max()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 0})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1})
actual = ds.argmax()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': b'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'S1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': u'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'U1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({'x': 1})
actual = Dataset({'x': True}).sum()
self.assertDatasetIdentical(expected, actual)
# regression test for GH505
expected = Dataset({'x': 3})
actual = Dataset({'x': ('y', np.array([1, 2], 'uint16'))}).sum()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1 + 1j})
actual = Dataset({'x': ('y', [1, 1j])}).sum()
self.assertDatasetIdentical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {'attr1': 'value1', 'attr2': 2929}
attrs = OrderedDict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
self.assertEqual(ds.attrs, {})
for v in ds.data_vars.values():
self.assertEqual(v.attrs, {})
# Test kept attrs
ds = data.mean(keep_attrs=True)
self.assertEqual(ds.attrs, attrs)
for k, v in ds.data_vars.items():
self.assertEqual(v.attrs, data[k].attrs)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({'a': ('x', [0, 1])})
expected = Dataset({'a': ([], 0)})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
actual = ds.argmin('x')
self.assertDatasetIdentical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({'x': ('a', [2, 2]), 'y': 2, 'z': ('b', [2])})
expected = Dataset({'x': 0, 'y': 0, 'z': 0})
actual = ds.var()
self.assertDatasetIdentical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, (int, np.integer)):
raise TypeError('non-integer axis')
return x.mean(axis)
ds = Dataset({'a': (['x', 'y'], [[0, 1, 2, 3, 4]])})
expected = Dataset({'a': ('x', [2])})
actual = ds.reduce(mean_only_one_axis, 'y')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])
def test_count(self):
ds = Dataset({'x': ('a', [np.nan, 1]), 'y': 0, 'z': np.nan})
expected = Dataset({'x': 1, 'y': 1, 'z': 0})
actual = ds.count()
self.assertDatasetIdentical(expected, actual)
def test_apply(self):
data = create_test_data()
data.attrs['foo'] = 'bar'
self.assertDatasetIdentical(data.apply(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.apply(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(data.apply(lambda x: x, keep_attrs=True),
data.drop('time'))
def scale(x, multiple=1):
return multiple * x
actual = data.apply(scale, multiple=2)
self.assertDataArrayEqual(actual['var1'], 2 * data['var1'])
self.assertDataArrayIdentical(actual['numbers'], data['numbers'])
actual = data.apply(np.asarray)
expected = data.drop('time') # time is not used on a data var
self.assertDatasetEqual(expected, actual)
def make_example_math_dataset(self):
variables = OrderedDict(
[('bar', ('x', np.arange(100, 400, 100))),
('foo', (('x', 'y'), 1.0 * np.arange(12).reshape(3, 4)))])
coords = {'abc': ('x', ['a', 'b', 'c']),
'y': 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds['foo'][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, +ds)
self.assertDatasetIdentical(ds, ds + 0)
self.assertDatasetIdentical(ds, 0 + ds)
self.assertDatasetIdentical(ds, ds + np.array(0))
self.assertDatasetIdentical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
self.assertDatasetIdentical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds.apply(abs), abs(ds))
self.assertDatasetIdentical(ds.apply(lambda x: x + 4), ds + 4)
for func in [lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int)]:
self.assertDatasetIdentical(ds.apply(func), func(ds))
self.assertDatasetIdentical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with self.assertRaises(AttributeError):
ds.item
with self.assertRaises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.apply(lambda x: x - ds['foo'])
self.assertDatasetIdentical(expected, ds - ds['foo'])
self.assertDatasetIdentical(expected, -ds['foo'] + ds)
self.assertDatasetIdentical(expected, ds - ds['foo'].variable)
self.assertDatasetIdentical(expected, -ds['foo'].variable + ds)
actual = ds.copy(deep=True)
actual -= ds['foo']
self.assertDatasetIdentical(expected, actual)
expected = ds.apply(lambda x: x + ds['bar'])
self.assertDatasetIdentical(expected, ds + ds['bar'])
actual = ds.copy(deep=True)
actual += ds['bar']
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ds['bar'] + np.arange(3)})
self.assertDatasetIdentical(expected, ds[['bar']] + np.arange(3))
self.assertDatasetIdentical(expected, np.arange(3) + ds[['bar']])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, ds + 0 * ds)
self.assertDatasetIdentical(ds, ds + {'foo': 0, 'bar': 0})
expected = ds.apply(lambda x: 2 * x)
self.assertDatasetIdentical(expected, 2 * ds)
self.assertDatasetIdentical(expected, ds + ds)
self.assertDatasetIdentical(expected, ds + ds.data_vars)
self.assertDatasetIdentical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
self.assertDatasetIdentical(expected, actual)
self.assertEqual(expected_id, id(actual))
self.assertDatasetIdentical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
self.assertDatasetIdentical(expected, subsampled + ds)
self.assertDatasetIdentical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(x=slice(2), y=[1, 3])
expected = 2 * subset
actual = ds + subset
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds.isel(x=slice(1)) + ds.isel(x=slice(1, None))
actual = ds + ds[['bar']]
expected = (2 * ds[['bar']]).merge(ds.coords)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + Dataset()
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
Dataset() + Dataset()
ds2 = Dataset(coords={'bar': 42})
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + ds2
# maybe unary arithmetic with empty datasets should raise instead?
self.assertDatasetIdentical(Dataset() + 1, Dataset())
for other in [ds.isel(x=slice(2)), ds.bar.isel(x=slice(0))]:
actual = ds.copy(deep=True)
other = ds.isel(x=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
self.assertDatasetIdentical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with self.assertRaises(TypeError):
ds['foo'] += ds
with self.assertRaises(TypeError):
ds['foo'].variable += ds
with self.assertRaisesRegexp(ValueError, 'must have the same'):
ds += ds[['bar']]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64('2000-01-01T12'), coords={'c': 2})
actual = ds.copy(deep=True)
with self.assertRaises(TypeError):
actual += other
self.assertDatasetIdentical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset({'a': (('x', 'y'), np.random.randn(3, 4)),
'b': (('y', 'x'), np.random.randn(4, 3))})
actual = ds.transpose()
expected = ds.apply(lambda x: x.transpose())
self.assertDatasetIdentical(expected, actual)
actual = ds.T
self.assertDatasetIdentical(expected, actual)
actual = ds.transpose('x', 'y')
expected = ds.apply(lambda x: x.transpose('x', 'y'))
self.assertDatasetIdentical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds:
self.assertEqual(actual[k].dims[::-1], ds[k].dims)
new_order = ('dim2', 'dim3', 'dim1', 'time')
actual = ds.transpose(*new_order)
for k in ds:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
self.assertEqual(actual[k].dims, expected_dims)
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3')
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3', 'time', 'extra_dim')
def test_dataset_diff_n1_simple(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [1, 2, 3]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1_lower(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x', label='lower')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [0, 1, 2]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2')
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2', n=2)
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, 'must be non-negative'):
ds.diff('dim2', n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, '\'label\' argument has to'):
ds.diff('dim2', label='raise_me')
def test_real_and_imag(self):
attrs = {'foo': 'bar'}
ds = Dataset({'x': ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({'x': ((), 1, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.real, expected_re)
expected_im = Dataset({'x': ((), 2, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.imag, expected_im)
|
apache-2.0
|
kiddinn/plaso
|
utils/plot_storage.py
|
4
|
2620
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to plot storage IO timing usage from profiling data.
This script requires the matplotlib and numpy Python modules.
"""
import argparse
import glob
import os
import sys
import numpy
from matplotlib import pyplot
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Plots storage IO timing from profiling data.'))
argument_parser.add_argument(
'--output', dest='output_file', type=str, help=(
'path of the output file to write the graph to instead of using '
'interactive mode. The output format deduced from the extension '
'of the filename.'))
argument_parser.add_argument(
'--process', dest='process', type=str, default='', help=(
'comma separated list of names of processes to graph.'))
argument_parser.add_argument(
'profile_path', type=str, help=(
'path to the directory containing the profiling data.'))
options = argument_parser.parse_args()
if not os.path.isdir(options.profile_path):
print('No such directory: {0:s}'.format(options.profile_path))
return False
processes = []
if options.process:
processes = options.process.split(',')
names = [
'time', 'name', 'operation', 'description', 'cpu', 'logical_size', 'size']
glob_expression = os.path.join(options.profile_path, 'storage-*.csv.gz')
for csv_file_name in glob.glob(glob_expression):
process_name = os.path.basename(csv_file_name)
process_name = process_name.replace('storage-', '').replace('.csv.gz', '')
if processes and process_name not in processes:
continue
data = numpy.genfromtxt(
csv_file_name, delimiter='\t', dtype=None, encoding='utf-8',
names=names, skip_header=1)
if data.size > 0:
for name in numpy.unique(data['name']):
data_by_name = numpy.extract(data['name'] == name, data)
data_bytes_per_second = numpy.divide(
data_by_name['logical_size'], data_by_name['cpu'])
label = '-'.join([name, process_name])
pyplot.plot(data_by_name['time'], data_bytes_per_second, label=label)
pyplot.title('Bytes read/write over time')
pyplot.xlabel('Time')
pyplot.xscale('linear')
pyplot.ylabel('Bytes per seconds')
pyplot.yscale('linear')
pyplot.legend()
if options.output_file:
pyplot.savefig(options.output_file)
else:
pyplot.show()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
apache-2.0
|
anlambert/tulip
|
doc/python/tabulate.py
|
1
|
60926
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2019 Sergey Astanin
# https://bitbucket.org/astanin/python-tabulate
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
import math
if python_version_tuple() >= ("3", "3", "0"):
from collections.abc import Iterable
else:
from collections import Iterable
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long # noqa
_float_type = float
_text_type = unicode # noqa
_binary_type = str
def _is_file(f):
return isinstance(f, file) # noqa
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.4"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT = "g"
_DEFAULT_MISSINGVAL = ""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column
# headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w)
for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."}
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(
celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns,
header=''):
alignment = {"left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">'}
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("â•’", "â•", "╤", "â••"),
linebelowheader=Line("╞", "â•", "╪", "â•¡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "â•", "â•§", "â•›"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"github":
TableFormat(lineabove=Line("|", "-", "|", "|"),
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line(
"{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(
_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs, "||",
header="'''"),
datarow=partial(_moin_row_with_attrs, "||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular,
booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}",
"", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
# The table formats for which multiline cells will be folded into subsequent
# table rows. The key is the original format specified at the API. The value is
# the format that will be used to represent the original format.
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
# TODO: Add multiline support for the remaining table formats:
# - mediawiki: Replace \n with <br>
# - moinmoin: TBD
# - youtrack: TBD
# - html: Replace \n with <br>
# - latex*: Use "makecell" package: In header, replace X\nY with
# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y}
# - tsv: TBD
# - textile: Replace \n with <br/> (must be well-formed XML)
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
# ANSI color codes
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
# ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
""" # noqa
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return (type(string) is inttype or
(isinstance(string, _binary_type) or
isinstance(string, _text_type))
and _isconvertible(inttype, string))
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return (type(string) is _bool_type or
(isinstance(string, (_binary_type, _text_type))
and string in ("True", "False")))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
""" # noqa
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False,
is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment,
has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms)))
for ms in strings]
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s)
for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s)
for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0,
_bool_type: 1,
int: 2,
float: 3,
_binary_type: 4,
_text_type: 5}
invtypes = {5: _text_type,
4: _binary_type,
3: float,
2: int,
1: _bool_type,
0: _none_type}
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse)
for s in strings]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value according to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
""" # noqa
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = (has_invisible and
isinstance(val, (_text_type, _binary_type)))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v, row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
# columns have to be transposed
rows = list(izip_longest(*tabular_data.values()))
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a
# pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
# values matrix doesn't need to be transposed
vals = tabular_data.values
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError(
"tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError(
'headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and
not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and
not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default",
disable_numparse=False, colalign=None):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
â•’â•â•â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â••
│ strings │ numbers │
╞â•â•â•â•â•â•â•â•â•â•â•╪â•â•â•â•â•â•â•â•â•â•â•â•¡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘â•â•â•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•›
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
""" # noqa
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be
# escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists,
headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row))
for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend((len(cols)-len(float_formats)) *
[_DEFAULT_FLOATFMT])
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend((len(cols)-len(missing_vals)) *
[_DEFAULT_MISSINGVAL])
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats,
missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
if colalign is not None:
assert isinstance(colalign, Iterable)
for idx, align in enumerate(colalign):
aligns[idx] = align
minwidths = [width_fn(h) + MIN_PADDING
for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars,
is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl)
for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline,
width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns,
is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are
False, and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths,
colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl)))
for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
# do it later, in _append_multiline_row
pad_row = lambda row, _: row # noqa
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns,
fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
""" # noqa
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:A:f:",
["help", "header", "output", "sep=",
"float=", "align=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
colalign = None
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-C", "--colalign"]:
colalign = value.split()
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt,
colalign=colalign), file=file)
if __name__ == "__main__":
_main()
|
lgpl-3.0
|
simonsgit/bachelor_stuff
|
tests/test1.py
|
1
|
2169
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 12:12:29 2015
@author: stamylew
"""
import factorial
print "__name__ is:", __name__
import numpy as np
from factorial import factorial
import matplotlib as mpl
from matplotlib import pyplot as plt
#for i in range (1,101):
# if i % 3 == 0 and i % 5 == 0:
# print "fizzbuzz"
# elif i % 3 == 0:
# print "fizz"
# elif i % 5 == 0:
# print "buzz"
# else:
# print i
#s = "hello"
#print s[1:]
#print s[2:4]
#print s[0:-1:1]
#print s[::2]
#import math
#from math import pi as circle_number
#print "cos(pi)=%f" % (math.cos(circle_number),)
#a = np.zeros((4,6), dtype=np.uint8)
#print a.ndim
#print a.shape[1]
#b = np.random.random(a.shape)
#c = a + b
#d = a * b
#e = a / (b +1)
#f = np.sqrt(b)
#print b
#a[:] = 1
#a[1,2] = 2
#s = np.sum(b)
#assert s == 7
#print s
#a[:,0] = 42
#a[0,...] = 42
#b = a[:,0:2]
#b = np.zeros((150,200), dtype=np.uint8)
#b[:,100] = 1
#b[75,:] = 1
#print b
#def f(x):
# return np.sin(x)
#x = np.arange(-1.0, 1.0, 0.01)
#plt.xlabel('x')
#plt.ylabel('y')
#plt.title('Graph of sin(x) from -1 to 1')
#plt.plot(x, f(x))
#plt.axis([-1,1,-1,1])
#plt.savefig('graph')
#print factorial(5)
#
#a = np.asarray([[2,5,4,3,1,],[1,2,1,5,7]])
#print a
#print np.where(a == 4)
#print a[np.where(a == 1)]
a = [[4, 0, -1],
[2, 5, 4],
[0, 0, 5]]
print a
print "eigenvectors: ", np.linalg.eig(a)
a = np.array(a)
evs = np.array([[ 0.00000000e+00, 4.47213595e-01, -5.55111512e-16],
[ 1.00000000e+00, -8.94427191e-01, -1.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 5.55111512e-16]])
invevs = np.linalg.inv(evs)
#print invevs
l = [[5, 0, 0],
[0, 4, 0],
[0, 0 ,5]]
#print l
b = np.dot(l, invevs)
#print b
c = np.dot(evs, b)
#print c
print np.dot(a, evs)
#import vigra
#
#vigra.impex.writeHDF5(evs, "data_delme.hdf5", "blubb", compression="lzf")
#
#l = vigra.readHDF5("data_delme.hdf5", "blubb")
#a = 5
#b = a == 3
#print b
#print np.shape(evs) == (3, 2)
#assert(np.shape(evs) == (3, 2)), "the shape schould be (3, 3)"
#print "atest", a[:-1, :]
#
##t = np.array([1,34,5])
#print np.dot(a, evs[0])
#print evs[0] * 5
|
mit
|
btabibian/scikit-learn
|
sklearn/cluster/birch.py
|
11
|
23640
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
justincassidy/scikit-learn
|
examples/linear_model/plot_lasso_model_selection.py
|
311
|
5431
|
"""
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
bsd-3-clause
|
kagayakidan/scikit-learn
|
examples/model_selection/plot_underfitting_overfitting.py
|
230
|
2649
|
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
|
bsd-3-clause
|
vybstat/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
57
|
8062
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
|
bsd-3-clause
|
ronanki/merlin
|
src/keras_lib/data_utils.py
|
1
|
14709
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://github.com/CSTR-Edinburgh/merlin
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import os, sys
import regex as re
import time
import random
import numpy as np
from sklearn import preprocessing
from io_funcs.binary_io import BinaryIOCollection
############################
##### Memory variables #####
############################
UTT_BUFFER_SIZE = 10000
FRAME_BUFFER_SIZE = 3000000
def read_data_from_file_list(inp_file_list, out_file_list, inp_dim, out_dim, sequential_training=True):
io_funcs = BinaryIOCollection()
num_of_utt = len(inp_file_list)
file_length_dict = {'framenum2utt':{}, 'utt2framenum':{}}
if sequential_training:
temp_set_x = {}
temp_set_y = {}
else:
temp_set_x = np.empty((FRAME_BUFFER_SIZE, inp_dim))
temp_set_y = np.empty((FRAME_BUFFER_SIZE, out_dim))
### read file by file ###
current_index = 0
for i in xrange(num_of_utt):
inp_file_name = inp_file_list[i]
out_file_name = out_file_list[i]
inp_features, inp_frame_number = io_funcs.load_binary_file_frame(inp_file_name, inp_dim)
out_features, out_frame_number = io_funcs.load_binary_file_frame(out_file_name, out_dim)
base_file_name = os.path.basename(inp_file_name).split(".")[0]
if abs(inp_frame_number-out_frame_number)>5:
print 'the number of frames in input and output features are different: %d vs %d (%s)' %(inp_frame_number, out_frame_number, base_file_name)
sys.exit(0)
else:
frame_number = min(inp_frame_number, out_frame_number)
if sequential_training:
temp_set_x[base_file_name] = inp_features[0:frame_number]
temp_set_y[base_file_name] = out_features[0:frame_number]
else:
temp_set_x[current_index:current_index+frame_number, ] = inp_features[0:frame_number]
temp_set_y[current_index:current_index+frame_number, ] = out_features[0:frame_number]
current_index += frame_number
if frame_number not in file_length_dict['framenum2utt']:
file_length_dict['framenum2utt'][frame_number] = [base_file_name]
else:
file_length_dict['framenum2utt'][frame_number].append(base_file_name)
file_length_dict['utt2framenum'][base_file_name] = frame_number
drawProgressBar(i+1, num_of_utt)
sys.stdout.write("\n")
if not sequential_training:
temp_set_x = temp_set_x[0:current_index, ]
temp_set_y = temp_set_y[0:current_index, ]
return temp_set_x, temp_set_y, file_length_dict
def read_test_data_from_file_list(inp_file_list, inp_dim, sequential_training=True):
io_funcs = BinaryIOCollection()
num_of_utt = len(inp_file_list)
file_length_dict = {'framenum2utt':{}, 'utt2framenum':{}}
if sequential_training:
temp_set_x = {}
else:
temp_set_x = np.empty((FRAME_BUFFER_SIZE, inp_dim))
### read file by file ###
current_index = 0
for i in xrange(num_of_utt):
inp_file_name = inp_file_list[i]
inp_features, frame_number = io_funcs.load_binary_file_frame(inp_file_name, inp_dim)
base_file_name = os.path.basename(inp_file_name).split(".")[0]
if sequential_training:
temp_set_x[base_file_name] = inp_features
else:
temp_set_x[current_index:current_index+frame_number, ] = inp_features[0:frame_number]
current_index += frame_number
if frame_number not in file_length_dict['framenum2utt']:
file_length_dict['framenum2utt'][frame_number] = [base_file_name]
else:
file_length_dict['framenum2utt'][frame_number].append(base_file_name)
file_length_dict['utt2framenum'][base_file_name] = frame_number
drawProgressBar(i+1, num_of_utt)
sys.stdout.write("\n")
if not sequential_training:
temp_set_x = temp_set_x[0:current_index, ]
return temp_set_x, file_length_dict
def transform_data_to_3d_matrix(data, seq_length=200, max_length=0, merge_size=1, shuffle_data = True, shuffle_type = 1, padding="right"):
num_of_utt = len(data)
feat_dim = data[data.keys()[0]].shape[1]
if max_length > 0:
temp_set = np.zeros((num_of_utt, max_length, feat_dim))
### read file by file ###
current_index = 0
for base_file_name, in_features in data.iteritems():
frame_number = min(in_features.shape[0], max_length)
if padding=="right":
temp_set[current_index, 0:frame_number, ] = in_features
else:
temp_set[current_index, -frame_number:, ] = in_features
current_index += 1
else:
temp_set = np.zeros((FRAME_BUFFER_SIZE, feat_dim))
train_idx_list = data.keys()
train_idx_list.sort()
if shuffle_data:
if shuffle_type == 1:
train_idx_list = shuffle_file_list(train_idx_list)
elif shuffle_type == 2:
train_idx_list = shuffle_file_list(train_idx_list, shuffle_type=2, merge_size=merge_size)
### read file by file ###
current_index = 0
for file_number in xrange(num_of_utt):
base_file_name = train_idx_list[file_number]
in_features = data[base_file_name]
frame_number = in_features.shape[0]
temp_set[current_index:current_index+frame_number, ] = in_features
current_index += frame_number
if (file_number+1)%merge_size == 0:
current_index = seq_length * (int(np.ceil(float(current_index)/float(seq_length))))
num_of_samples = int(np.ceil(float(current_index)/float(seq_length)))
temp_set = temp_set[0: num_of_samples*seq_length, ]
temp_set = temp_set.reshape(-1, seq_length, feat_dim)
return temp_set
def read_and_transform_data_from_file_list(in_file_list, dim, seq_length=200, merge_size=1):
io_funcs = BinaryIOCollection()
num_of_utt = len(in_file_list)
temp_set = np.zeros((FRAME_BUFFER_SIZE, dim))
### read file by file ###
current_index = 0
for i in range(num_of_utt):
in_file_name = in_file_list[i]
in_features, frame_number = io_funcs.load_binary_file_frame(in_file_name, dim)
base_file_name = os.path.basename(in_file_name).split(".")[0]
temp_set[current_index:current_index+frame_number, ] = in_features
current_index += frame_number
if (i+1)%merge_size == 0:
current_index = seq_length * (int(np.ceil(float(current_index)/float(seq_length))))
drawProgressBar(i+1, num_of_utt)
sys.stdout.write("\n")
num_of_samples = int(np.ceil(float(current_index)/float(seq_length)))
temp_set = temp_set[0: num_of_samples*seq_length, ]
temp_set = temp_set.reshape(num_of_samples, seq_length)
return temp_set
def merge_data(train_x, train_y, merge_size):
temp_train_x = {}
temp_train_y = {}
train_id_list = train_x.keys()
train_file_number = len(train_id_list)
train_id_list.sort()
inp_dim = train_x[train_id_list[0]].shape[1]
out_dim = train_y[train_id_list[0]].shape[1]
merged_features_x = np.zeros((0, inp_dim))
merged_features_y = np.zeros((0, out_dim))
new_file_count = 0
for file_index in xrange(1, train_file_number+1):
inp_features = train_x[train_id_list[file_index-1]]
out_features = train_y[train_id_list[file_index-1]]
merged_features_x = np.vstack((merged_features_x, inp_features))
merged_features_y = np.vstack((merged_features_y, out_features))
if file_index % merge_size == 0 or file_index==train_file_number:
base_file_name = "new_utterance_%04d" % (new_file_count)
temp_train_x[base_file_name] = merged_features_x
temp_train_y[base_file_name] = merged_features_y
new_file_count += 1
merged_features_x = np.zeros((0, inp_dim))
merged_features_y = np.zeros((0, out_dim))
return temp_train_x, temp_train_y
def shuffle_file_list(train_idx_list, shuffle_type=1, merge_size=5):
### shuffle train id list ###
random.seed(271638)
train_file_number = len(train_idx_list)
if shuffle_type==1: ## shuffle by sentence
random.shuffle(train_idx_list)
return train_idx_list
elif shuffle_type==2: ## shuffle by a group of sentences
id_numbers = range(0, train_file_number, merge_size)
random.shuffle(id_numbers)
new_train_idx_list = []
for i in xrange(len(id_numbers)):
new_train_idx_list += train_idx_list[id_numbers[i]:id_numbers[i]+merge_size]
return new_train_idx_list
def get_stateful_data(train_x, train_y, batch_size):
num_of_batches = int(train_x.shape[0]/batch_size)
train_x = train_x[0: num_of_batches*batch_size, ]
train_y = train_y[0: num_of_batches*batch_size, ]
stateful_seq = np.zeros(num_of_batches*batch_size, dtype="int32")
for i in xrange(num_of_batches):
stateful_seq[i*batch_size:(i+1)*batch_size] = np.array(range(batch_size))*num_of_batches+i
temp_train_x = train_x[stateful_seq]
temp_train_y = train_y[stateful_seq]
return temp_train_x, temp_train_y
def get_stateful_input(test_x, seq_length, batch_size=1):
[n_frames, n_dim] = test_x.shape
num_of_samples = batch_size*seq_length
num_of_batches = int(n_frames/num_of_samples) + 1
new_data_size = num_of_batches*num_of_samples
temp_test_x = np.zeros((new_data_size, n_dim))
temp_test_x[0: n_frames, ] = test_x
temp_test_x = temp_test_x.reshape(-1, seq_length, n_dim)
return temp_test_x
def compute_norm_stats(data, stats_file, method="MVN"):
#### normalize training data ####
io_funcs = BinaryIOCollection()
if method=="MVN":
scaler = preprocessing.StandardScaler().fit(data)
norm_matrix = np.vstack((scaler.mean_, scaler.scale_))
elif method=="MINMAX":
scaler = preprocessing.MinMaxScaler(feature_range=(0.01, 0.99)).fit(data)
norm_matrix = np.vstack((scaler.min_, scaler.scale_))
print norm_matrix.shape
io_funcs.array_to_binary_file(norm_matrix, stats_file)
return scaler
def load_norm_stats(stats_file, dim, method="MVN"):
#### load norm stats ####
io_funcs = BinaryIOCollection()
norm_matrix, frame_number = io_funcs.load_binary_file_frame(stats_file, dim)
assert frame_number==2
if method=="MVN":
scaler = preprocessing.StandardScaler()
scaler.mean_ = norm_matrix[0, :]
scaler.scale_ = norm_matrix[1, :]
elif method=="MINMAX":
scaler = preprocessing.MinMaxScaler(feature_range=(0.01, 0.99))
scaler.min_ = norm_matrix[0, :]
scaler.scale_ = norm_matrix[1, :]
return scaler
def norm_data(data, scaler, sequential_training=True):
if scaler is None:
return;
#### normalize data ####
if not sequential_training:
data = scaler.transform(data)
else:
for filename, features in data.iteritems():
data[filename] = scaler.transform(features)
def denorm_data(data, scaler):
if scaler is None:
return;
#### de-normalize data ####
data = scaler.inverse_transform(data)
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def read_file_list(file_name):
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
return file_lists
def print_status(i, length):
pr = int(float(i)/float(length)*100)
st = int(float(pr)/7)
sys.stdout.write(("\r%d/%d ")%(i,length)+("[ %d"%pr+"% ] <<< ")+('='*st)+(''*(100-st)))
sys.stdout.flush()
def drawProgressBar(indx, length, barLen = 20):
percent = float(indx)/length
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i < int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[%s] <<< %d/%d (%d%%)" % (progress, indx, length, percent * 100))
sys.stdout.flush()
|
apache-2.0
|
shangwuhencc/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
255
|
13919
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/examples/pylab_examples/newscalarformatter_demo.py
|
13
|
3313
|
#!/usr/bin/env python
# Demonstrating the improvements and options of the proposed new ScalarFormatter
from pylab import *
from matplotlib.ticker import OldScalarFormatter
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The old formatter',horizontalalignment='center',verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, default settings',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, no numerical offset',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, with mathtext',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
show()
|
mit
|
cauchycui/scikit-learn
|
examples/feature_stacker.py
|
246
|
1906
|
"""
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
|
bsd-3-clause
|
zihua/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
34
|
18227
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
bsd-3-clause
|
brdfdr/trading-with-python
|
sandbox/spreadCalculations.py
|
78
|
1496
|
'''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
|
bsd-3-clause
|
lbishal/scikit-learn
|
sklearn/metrics/cluster/unsupervised.py
|
230
|
8281
|
""" Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
|
bsd-3-clause
|
oztalha/Islamicity
|
islamicity.py
|
1
|
1051
|
# by Talha Oz
# @tozCSS
import pandas as pd
from countrycode import countrycode
#Extracted the table using Tabula from Rehman, Scheherazade S., and Hossein Askari.
# “How Islamic Are Islamic Countries?” Global Economy Journal 10, no. 2 (May 21, 2010)
# http://www.degruyter.com/abstract/j/gej.2010.10.2/gej.2010.10.2.1614/gej.2010.10.2.1614.xml
islam = pd.read_csv('Islamicity.csv')
ccodes = countrycode(codes=[ct.strip() for ct in islam.Country.tolist()], origin='country_name', target='iso2c')
# saved it as CSV and uploaded to Google Fusion Tables
# Scraped list from http://en.wikipedia.org/wiki/Member_states_of_the_Organisation_of_Islamic_Cooperation using kimono
results = json.load(urllib.urlopen("https://www.kimonolabs.com/api/bdhe04wo?apikey=VPXUp7hOkUYDIL3wC8GVMHFQcOW3O7HC"))
r = results.get('results').get('collection1')
oic = [ct[u'oic_countries'][u'text'] for ct in r]
oic_codes = countrycode(codes=oic, origin='country_name', target='iso2c')
# saved it as CSV and uploaded this to Google Fusion Tables as well
|
mit
|
vermouthmjl/scikit-learn
|
sklearn/neighbors/tests/test_approximate.py
|
55
|
19053
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
buguen/pylayers
|
pylayers/antprop/tests/test_reciprocity.py
|
2
|
1761
|
from pylayers.antprop.rays import *
from pylayers.gis.layout import *
from pylayers.antprop.signature import Signatures
import pylayers.signal.bsignal as bs
import pylayers.signal.waveform as wvf
from pylayers.simul.link import *
import matplotlib.pyplot as plt
import time
print "======================="
print " start test_reciprocity.py "
print "======================="
S1 = DLink(L='defstr.ini')
S2 = DLink(L='defstr.ini')
S1.a=array([759,1114,1.0])
S1.b=array([767,1114,1.5])
S2.a=array([767,1114,1.5])
S2.b=array([759,1114,1.0])
fGHz =np.arange(2,11,0.1)
wav = wvf.Waveform(fcGHz=5,bandGHz=3)
#
# Dans un sens
#
S1.eval(force=1)
S2.eval(force=1)
######
####### puis dans l'autre
#######
######print "second rayon"
#print '##############'
#print '# reciprocal #'
#print '##############'
#
#r2d2 = r2d.reciprocal()
####### get new reciprocal r3d
#r3d2 = r2d2.to3D(S.L)
#r3d2.locbas(S.L)
#
#r3d2.fillinter(S.L)
#C2=r3d2.eval(fGHz)
######C2.sort()
#sc2=C2.prop2tran()
#sc2.sort()
#chw2 = sc2.apply(wav.sfg)
#rir2 = chw2.rir(Nz=500,ffts=1)
#plt.imshow(rir2,interpolation='nearest',cmap=plt.cm.jet)
#plt.axis('auto')
#plt.figure()
#plt.imshow(np.log10(abs(rir2)),interpolation='nearest',cmap=plt.cm.jet)
#plt.axis('auto')
## sc2=C2.prop2tran()
## chw = sc2.apply(wav.sfg)
## cir = chw.rir(Nz=500,ffts=1)
## plt.imshow(cir.y[:,0,0,:],interpolation='nearest')
## plt.axis('auto')
## cir2 = sc2.applywavB(wav.sfg)
#######
#######print r3d1[2]['sig'][:,:,0]
#######print r3d2[2]['sig'][:,:,1]
#######
#######
#r3d1.check_reciprocity(r3d2)
#C1.check_reciprocity(C2)
## plt.figure()
## plt.plot(cir1.x,cir1.y[0,0,:],'b',cir2.x,cir2.y[0,0,:],'r')
## plt.axis('auto')
## plt.figure()
## plt.plot(cir1.x,cir1.y[0,0,:],'b',cir2.x,cir2.y[0,0,:],'r')
## plt.axis('auto')
|
lgpl-3.0
|
NSLS-II-XPD/ipython_ophyd
|
archived/profile_collection-dev/ipython_config.py
|
2
|
23016
|
# Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = traitlets.Undefined
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = traitlets.Undefined
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = traitlets.Undefined
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = ''
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# The IPython profile to use.
# c.BaseIPythonApplication.profile = 'default'
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
# Save multi-line entries as one entry in readline history
# c.InteractiveShell.multiline_history = True
#
# c.InteractiveShell.wildcards_case_sensitive = True
#
# c.InteractiveShell.readline_use = True
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.readline_remove_delims = '-/~'
#
# c.InteractiveShell.separate_out = ''
#
# c.InteractiveShell.history_length = 10000
#
# c.InteractiveShell.ipython_dir = ''
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
# Deprecated, will be removed in IPython 5.0, use PromptManager.in_template
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# Deprecated, will be removed in IPython 5.0, use PromptManager.in2_template
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = traitlets.Undefined
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
#
# c.InteractiveShell.object_info_string_level = 0
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
#
# c.InteractiveShell.readline_parse_and_bind = traitlets.Undefined
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Deprecated, will be removed in IPython 5.0, use PromptManager.justify
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.debug = False
# Set the color scheme (NoColor, Linux, or LightBG).
# c.InteractiveShell.colors = 'Linux'
# Deprecated, will be removed in IPython 5.0, use PromptManager.out_template
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.InteractiveShell.separate_in = '\n'
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
#
# c.InteractiveShell.separate_out2 = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 3.4.4 |Continuum Analytics, Inc.| (default, Jan 11 2016, 13:54:01) \nType "copyright", "credits" or "license" for more information.\n\nIPython 4.2.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
#
# c.InteractiveShell.xmode = 'Context'
# The number of saved history entries to be loaded into the readline buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = traitlets.Undefined
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.enabled = True
#
# c.BaseFormatter.singleton_printers = traitlets.Undefined
#
# c.BaseFormatter.deferred_printers = traitlets.Undefined
#
# c.BaseFormatter.type_printers = traitlets.Undefined
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.max_width = 79
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.newline = '\n'
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = traitlets.Undefined
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = traitlets.Undefined
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
|
bsd-2-clause
|
anntzer/scipy
|
scipy/spatial/_spherical_voronoi.py
|
5
|
13698
|
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
def calculate_solid_angles(R):
"""Calculates the solid angles of plane triangles. Implements the method of
Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
that input points have unit norm."""
# Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
# This is equal to the determinant of the matrix [R1 R2 R3], which can be
# computed with better stability.
numerator = np.linalg.det(R)
denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
return np.abs(2 * np.arctan2(numerator, denominator))
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Methods
-------
calculate_areas
Calculates the areas of the Voronoi regions. For 2D point sets, the
regions are circular arcs. The sum of the areas is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
triangle. IEEE Transactions on Biomedical Engineering,
2, 1983, pp 125--126.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi, geometric_slerp
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> t_vals = np.linspace(0, 1, 2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... n = len(region)
... for i in range(n):
... start = sv.vertices[region][i]
... end = sv.vertices[region][(i + 1) % n]
... result = geometric_slerp(start, end, t_vals)
... ax.plot(result[..., 0],
... result[..., 1],
... result[..., 2],
... c='k')
>>> ax.azim = 10
>>> ax.elev = 40
>>> _ = ax.set_xticks([])
>>> _ = ax.set_yticks([])
>>> _ = ax.set_zticks([])
>>> fig.set_size_inches(4, 4)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.radius = float(radius)
self.points = np.array(points).astype(np.double)
self._dim = self.points.shape[1]
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center, dtype=float)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.points[0],
tol=threshold * self.radius)
if self._rank < self._dim:
raise ValueError("Rank of input points must be at least {0}".format(self._dim))
if cKDTree(self.points).query_pairs(threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# get Convex Hull
conv = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * conv.equations[:, :-1] + self.center
self._simplices = conv.simplices
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(len(self._simplices))
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in self._simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in self._simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
_voronoi.sort_vertices_of_regions(self._simplices, self.regions)
def _calculate_areas_3d(self):
self.sort_vertices_of_regions()
sizes = [len(region) for region in self.regions]
csizes = np.cumsum(sizes)
num_regions = csizes[-1]
# We create a set of triangles consisting of one point and two Voronoi
# vertices. The vertices of each triangle are adjacent in the sorted
# regions list.
point_indices = [i for i, size in enumerate(sizes)
for j in range(size)]
nbrs1 = np.array([r for region in self.regions for r in region])
# The calculation of nbrs2 is a vectorized version of:
# np.array([r for region in self.regions for r in np.roll(region, 1)])
nbrs2 = np.roll(nbrs1, 1)
indices = np.roll(csizes, 1)
indices[0] = 0
nbrs2[indices] = nbrs1[csizes - 1]
# Normalize points and vertices.
pnormalized = (self.points - self.center) / self.radius
vnormalized = (self.vertices - self.center) / self.radius
# Create the complete set of triangles and calculate their solid angles
triangles = np.hstack([pnormalized[point_indices],
vnormalized[nbrs1],
vnormalized[nbrs2]
]).reshape((num_regions, 3, 3))
triangle_solid_angles = calculate_solid_angles(triangles)
# Sum the solid angles of the triangles in each region
solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
solid_angles[1:] -= solid_angles[:-1]
# Get polygon areas using A = omega * r**2
return solid_angles * self.radius**2
def _calculate_areas_2d(self):
# Find start and end points of arcs
arcs = self.points[self._simplices] - self.center
# Calculate the angle subtended by arcs
cosine = np.einsum('ij,ij->i', arcs[:, 0], arcs[:, 1])
sine = np.abs(np.linalg.det(arcs))
theta = np.arctan2(sine, cosine)
# Get areas using A = r * theta
areas = self.radius * theta
# Correct arcs which go the wrong way (single-hemisphere inputs)
signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
self.vertices - self.center))
indices = np.where(signs < 0)
areas[indices] = 2 * np.pi * self.radius - areas[indices]
return areas
def calculate_areas(self):
"""Calculates the areas of the Voronoi regions.
For 2D point sets, the regions are circular arcs. The sum of the areas
is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
.. versionadded:: 1.5.0
Returns
-------
areas : double array of shape (npoints,)
The areas of the Voronoi regions.
"""
if self._dim == 2:
return self._calculate_areas_2d()
elif self._dim == 3:
return self._calculate_areas_3d()
else:
raise TypeError("Only supported for 2D and 3D point sets")
|
bsd-3-clause
|
nvoron23/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
268
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
f3r/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
Circumstellar/ChannelMaps
|
chmaps.py
|
1
|
9863
|
#!/usr/bin/env python
# Reads everything from a config.yaml file located in the same directory.
import argparse
parser = argparse.ArgumentParser(description="Plot channel maps.")
parser.add_argument("--config", default="config.yaml", help="The configuration file specifying defaults.")
parser.add_argument("--measure", action="store_true", help="Just measure the basic properties, like the RMS and number of channels, so that you may add them into the config.yaml file.")
parser.add_argument("--fmt", default="pdf", help="What file format to save the maps in.")
args = parser.parse_args()
import yaml
f = open(args.config)
config = yaml.load(f)
f.close()
# Do the measureing operations here
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import chmaps_common as common
matplotlib.rc("contour", negative_linestyle="dashed")
matplotlib.rc("axes", linewidth=0.5)
matplotlib.rc("xtick.major", size=2)
matplotlib.rc("ytick.major", size=2)
from matplotlib.ticker import FormatStrFormatter as FSF
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import MultipleLocator
import matplotlib.patheffects as path_effects
if config["telescope"] == "ALMA":
readfn = common.read_ALMA
elif config["telescope"] == "SMA":
readfn = common.read_SMA
if args.measure:
# Use the dataset to actually measure quantities like the maximum intensity and RMS
# so that they can be replicated for the other types of measurements and put on the same scale.
vs, data, header = readfn(config["data"])
# Now, think about truncating the channels
low = config["trim"]["blue"]
high = config["trim"]["red"]
if high != 0:
vs = vs[low:-high]
data = data[low:-high]
else:
vs = vs[low:]
data = data[low:]
print("Data shape", data.shape)
print("Average velocity", np.average(vs))
# Before truncation, try to measure an average RMS away from the emission.
region = config["RMS_region"]
print("RMS", np.std(data[:,0:region,0:region]), "Jy/beam")
print("RMS", np.std(data[:,0:region,-region:-1]), "Jy/beam")
print("RMS", np.std(data[:,-region:-1,-region:-1]), "Jy/beam")
print("RMS", np.std(data[:,-region:-1,0:region]), "Jy/beam")
# If we have the data, model, and residual specified, try determining the scaling factors from all of the channels, all of the datasets
# else, just use the data
try:
all_data = []
for kw in ["data", "model", "resid"]:
fname = config[kw]
vs, data, header = readfn(fname)
all_data.append(data)
all_data = np.concatenate(all_data)
except FileNotFoundError as e:
vs, data, header = readfn(config["data"])
all_data = data
vmin = np.min(all_data)
vmax = np.max(all_data)
vvmax = np.max(np.abs(all_data))
print("vvmax {:.4f}, vmin {:.4f}, vmax {:.4f}".format(vvmax, vmin, vmax))
import sys
sys.exit()
ax_size = 1.0 # in
margin = 0.5 # in. sides and bottom
nrows = config["nrows"]
ncols = config["ncols"]
radius = config["radius"]/3600. # [degrees]
rms = config["RMS"]
panel_width = ax_size * ncols # in
panel_height = ax_size * nrows # in
fig_width = panel_width + 2 * margin # in
fig_height = panel_height + 2 * margin # in
dx = ax_size / fig_width
dy = ax_size / fig_height
vvmax = config["vvmax"]
norm = matplotlib.colors.Normalize(-vvmax, vvmax)
cmap = plt.get_cmap("RdBu")
mu_RA = config["mu_RA"]
mu_DEC = config["mu_DEC"]
# If the ellipse is specified, read it and plot it
crosshairs = config.get("crosshairs", None)
if crosshairs:
a = crosshairs["major"]
incl = crosshairs["incl"]
PA = crosshairs["PA"]
b = a * math.cos(incl * np.pi/180.)
slope = math.tan(PA * np.pi/180.)
print("slope", slope)
# Because the crosshairs will be plotted on the channel map, where RA runs positive to the
# left, we will use a separate coordinate for RA plotting.
# if (PA > 0) and (PA < 90):
x_a = np.linspace(-a * math.cos(PA * np.pi/180.), a * math.cos(PA * np.pi/180.))
RA_a = -x_a
DEC_a = x_a * slope
x_b = np.linspace(-b * math.sin(PA * np.pi/180.), b * math.sin(PA * np.pi/180.))
RA_b = -x_b
DEC_b = -x_b / slope
def plot_maps(fits_name, fname):
try:
vs, data, header = readfn(fits_name)
except FileNotFoundError as e:
print("Cannot load {}, continuing.".format(fits_name))
return
if vs[-1] < vs[0]:
print("Reversing velocity to be increasing.")
vs = vs[::-1]
data = data[::-1]
# Now, think about truncating the channels
low = config["trim"]["blue"]
high = config["trim"]["red"]
if high != 0:
vs = vs[low:-high]
data = data[low:-high]
else:
vs = vs[low:]
data = data[low:]
nchan = data.shape[0]
dict = common.get_coords(data, header, radius, mu_RA, mu_DEC)
RA = 3600 * dict["RA"] # [arcsec]
DEC = 3600 * dict["DEC"] # [arcsec]
decl, decr = dict["DEC_slice"]
ral, rar = dict["RA_slice"]
data = dict["data"]
ext = (RA[0], RA[-1], DEC[0], DEC[-1]) # [arcsec]
print("Extent", ext)
# data = data[:, decl:decr, ral:rar]
# Using the systemic velocity, normalize these to the interval [0, 1], with 0.5 being the middle corresponding to the systemic velocity. Note that in this case, it is not likely that v_min = 0.0 && v_max = 1.0. Either v_min or v_max will be greater than 0.0 or less than 1.0, respectively, unless the systemic velocity is perfectly centered.
vsys = config["vsys"]
v_cent = vs - vsys
vrange = np.max(np.abs(v_cent))
vel_min = -vrange
vel_max = vrange
vel_fracs = 1 - (v_cent - vel_min)/(2 * vrange)
# Get contour levels specific to this data/model/resid set
vmin = np.min(data)
vmax = np.max(data)
levels = common.get_levels(rms, vmin, vmax)
# print("3 sigma contour levels:", levels)
fig = plt.figure(figsize=(fig_width, fig_height))
for row in range(nrows):
for col in range(ncols):
chan = row * ncols + col
if chan >= nchan:
continue
xmin = (margin + ax_size * col)/fig_width
ymin = 1.0 - ((margin + ax_size * (row + 1))/fig_height)
rect = [xmin, ymin, dx, dy]
ax = fig.add_axes(rect)
# cmap = make_cmap(vel_fracs[chan])
# print("Color", cmap(vel_fracs[chan]))
# Plot image
ax.imshow(data[chan], cmap=cmap, norm=norm, origin="lower", extent=ext)
# Plot contours
ax.contour(data[chan], origin="lower", levels=levels, linewidths=0.2, colors="black", extent=ext)
if crosshairs:
ax.plot(RA_a, DEC_a, lw=0.2, ls=":", color="0.2")
ax.plot(RA_b, DEC_b, lw=0.2, ls="-", color="0.2")
# Annotate the velocity
text = ax.annotate("{:.1f}".format(vs[chan]), (0.1, 0.8), xycoords="axes fraction", size=5, color=cmap(vel_fracs[chan]))
text.set_path_effects([path_effects.Stroke(linewidth=0.2, foreground='black'),
path_effects.Normal()])
if row == 0 and col == 0:
ax.annotate(r"$\textrm{km s}^{-1}$", (0.15, 0.6), xycoords="axes fraction", size=5)
# Plot the beam
if row == (nrows - 1) and col == 0:
common.plot_beam(ax, header, xy=(0.75 * 3600 * radius,-0.75 * 3600 * radius))
# Actually create axis labels
ax.set_xlabel(r"$\Delta \alpha$ [${}^{\prime\prime}$]", fontsize=8)
ax.set_ylabel(r"$\Delta \delta$ [${}^{\prime\prime}$]", fontsize=8)
ax.tick_params(axis='both', which='major', labelsize=8)
#
# ax.xaxis.set_major_formatter(FSF("%.0f"))
# ax.yaxis.set_major_formatter(FSF("%.0f"))
# ax.xaxis.set_major_locator(MultipleLocator(1.))
# ax.yaxis.set_major_locator(MultipleLocator(1.))
else:
# Hide axis label and tick labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.savefig(fname)
def plot_spectrum(fits_name, fname):
'''
Sum up all of the flux in each image to make a spatially-integrated line spectrum.
'''
try:
vs, data, header = readfn(fits_name)
except FileNotFoundError as e:
print("Cannot load {}, continuing.".format(fits_name))
return
# We always want to display the data from blueshifted to redshifted. This means we want to
# find out if the velocities are decreasing, and if so, switch them to increasing
if vs[-1] < vs[0]:
print("Reversing velocity to be increasing.")
vs = vs[::-1]
data = data[::-1]
nchan = data.shape[0]
dict = common.get_coords(data, header, radius, mu_RA, mu_DEC)
RA = 3600 * dict["RA"] # [arcsec]
DEC = 3600 * dict["DEC"] # [arcsec]
decl, decr = dict["DEC_slice"]
ral, rar = dict["RA_slice"]
data = dict["data"]
# Need to divide by how many pixels are in a beam, so that we get Jy/pixel
vsys = config["vsys"]
v_cent = vs - vsys
print(data.shape)
flux = np.sum(data, axis=(1,2))
fig, ax = plt.subplots(nrows=1)
ax.plot(v_cent, flux, ls="steps-mid")
ax.set_xlabel(r"$v$ [km/s]")
ax.set_ylabel("Flux [Jy]")
fig.savefig(fname)
# Go through and plot data, model, and residuals. If the file doesn't exist, the routine will skip.
plot_maps(config["data"], "data." + args.fmt)
plot_maps(config["model"], "model." + args.fmt)
plot_maps(config["resid"], "resid." + args.fmt)
# plot_spectrum(config["data"], "spec_data.pdf")
# plot_spectrum(config["model"], "spec_model.pdf")
# plot_spectrum(config["resid"], "spec_resid.pdf")
|
mit
|
BrechtBa/plottools
|
plottools/cm/hotwater.py
|
1
|
13692
|
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [1.2291944322885904, 15.686577994354849, 30.928398202273232, 36.957902166964374, 39.352619720280053, 37.854922859529893],
'yp': [-17.104696078108354, -28.200058582308117, -14.15860623697381, 0.48569724700903905, 13.595593405835402, 23.29120992542855],
'min_Jp': 9.74051896208,
'max_Jp': 59.6407185629}
cm_data = [[ 0.06334758, 0.04767714, 0.20764926],
[ 0.0668604 , 0.04840216, 0.21191903],
[ 0.07037658, 0.0490898 , 0.21617819],
[ 0.07389749, 0.04973982, 0.22042582],
[ 0.07742434, 0.05035199, 0.22466091],
[ 0.08096457, 0.05091956, 0.22889053],
[ 0.08451233, 0.05144902, 0.23310476],
[ 0.08806829, 0.05194035, 0.23730226],
[ 0.09163313, 0.05239343, 0.24148177],
[ 0.09520958, 0.0528058 , 0.24564459],
[ 0.09879869, 0.05317664, 0.24978991],
[ 0.10239764, 0.05350941, 0.25391239],
[ 0.10600672, 0.05380415, 0.25801055],
[ 0.10962615, 0.05406096, 0.26208286],
[ 0.11325606, 0.05427998, 0.26612774],
[ 0.11690003, 0.05445719, 0.27014746],
[ 0.1205542 , 0.05459734, 0.27413598],
[ 0.12421844, 0.05470086, 0.27809157],
[ 0.12789261, 0.05476814, 0.28201255],
[ 0.13157654, 0.05479966, 0.28589722],
[ 0.13526997, 0.05479595, 0.28974386],
[ 0.13897261, 0.05475763, 0.29355076],
[ 0.14268604, 0.05468282, 0.29731803],
[ 0.14640781, 0.05457496, 0.30104191],
[ 0.15013739, 0.05443505, 0.30472063],
[ 0.15387432, 0.05426399, 0.30835258],
[ 0.15761816, 0.05406275, 0.31193617],
[ 0.16136839, 0.05383237, 0.31546987],
[ 0.1651245 , 0.05357394, 0.31895217],
[ 0.16888596, 0.05328865, 0.32238164],
[ 0.1726522 , 0.05297774, 0.32575692],
[ 0.17642268, 0.05264248, 0.3290767 ],
[ 0.18019683, 0.05228424, 0.33233977],
[ 0.18397441, 0.05190387, 0.3355452 ],
[ 0.18775467, 0.05150312, 0.33869179],
[ 0.19153678, 0.05108387, 0.34177839],
[ 0.19532021, 0.05064763, 0.34480416],
[ 0.19910443, 0.05019595, 0.34776833],
[ 0.20288891, 0.04973039, 0.35067022],
[ 0.20667315, 0.04925253, 0.35350927],
[ 0.21045667, 0.04876397, 0.35628499],
[ 0.21423901, 0.04826632, 0.358997 ],
[ 0.21801972, 0.04776117, 0.36164501],
[ 0.22179838, 0.04725013, 0.36422884],
[ 0.22557459, 0.04673477, 0.36674838],
[ 0.229348 , 0.04621667, 0.36920361],
[ 0.23311825, 0.04569736, 0.3715946 ],
[ 0.23688503, 0.04517836, 0.37392151],
[ 0.24064806, 0.04466115, 0.37618456],
[ 0.24440706, 0.04414717, 0.37838406],
[ 0.24816181, 0.0436378 , 0.38052038],
[ 0.25191208, 0.0431344 , 0.38259396],
[ 0.25565771, 0.04263825, 0.38460528],
[ 0.25939854, 0.04215059, 0.3865549 ],
[ 0.26313442, 0.04167259, 0.38844342],
[ 0.26686525, 0.04120537, 0.39027148],
[ 0.27059094, 0.04074996, 0.39203976],
[ 0.27431143, 0.04030508, 0.39374899],
[ 0.27802666, 0.0398711 , 0.39539992],
[ 0.28173662, 0.03945441, 0.39699332],
[ 0.2854413 , 0.0390555 , 0.39852999],
[ 0.2891407 , 0.03867475, 0.40001076],
[ 0.29283486, 0.03831249, 0.40143645],
[ 0.2965238 , 0.03796897, 0.40280792],
[ 0.30020759, 0.03764436, 0.404126 ],
[ 0.30388629, 0.03733877, 0.40539157],
[ 0.30755997, 0.03705225, 0.40660548],
[ 0.31122873, 0.03678479, 0.40776859],
[ 0.31489267, 0.03653631, 0.40888175],
[ 0.31855185, 0.03630675, 0.40994582],
[ 0.3222064 , 0.03609595, 0.41096166],
[ 0.32585645, 0.03590364, 0.41193007],
[ 0.32950214, 0.03572956, 0.41285189],
[ 0.33314359, 0.03557341, 0.41372793],
[ 0.33678094, 0.03543485, 0.41455897],
[ 0.34041433, 0.03531353, 0.41534579],
[ 0.34404389, 0.03520905, 0.41608917],
[ 0.34766978, 0.03512098, 0.41678985],
[ 0.35129214, 0.0350489 , 0.41744855],
[ 0.35491111, 0.03499232, 0.418066 ],
[ 0.35852685, 0.03495077, 0.41864287],
[ 0.3621395 , 0.03492374, 0.41917986],
[ 0.3657492 , 0.03491073, 0.4196776 ],
[ 0.36935611, 0.03491119, 0.42013675],
[ 0.37296037, 0.0349246 , 0.42055792],
[ 0.37656213, 0.03495041, 0.4209417 ],
[ 0.38016152, 0.03498806, 0.42128867],
[ 0.3837587 , 0.03503698, 0.4215994 ],
[ 0.38735379, 0.03509663, 0.42187443],
[ 0.39094694, 0.03516642, 0.42211427],
[ 0.39453828, 0.0352458 , 0.42231944],
[ 0.39812795, 0.03533418, 0.42249041],
[ 0.40171607, 0.035431 , 0.42262766],
[ 0.40530277, 0.0355357 , 0.42273165],
[ 0.40888809, 0.0356479 , 0.4228029 ],
[ 0.41247224, 0.03576683, 0.42284173],
[ 0.41605535, 0.03589193, 0.42284854],
[ 0.41963751, 0.03602264, 0.42282371],
[ 0.42321886, 0.0361584 , 0.42276761],
[ 0.42679949, 0.03629868, 0.42268061],
[ 0.43037952, 0.03644293, 0.42256303],
[ 0.43395904, 0.03659062, 0.4224152 ],
[ 0.43753815, 0.03674122, 0.42223744],
[ 0.44111695, 0.03689422, 0.42203005],
[ 0.44469553, 0.03704909, 0.42179331],
[ 0.44827399, 0.03720534, 0.4215275 ],
[ 0.4518524 , 0.03736247, 0.42123288],
[ 0.45543084, 0.03752 , 0.4209097 ],
[ 0.45900941, 0.03767745, 0.4205582 ],
[ 0.46258817, 0.03783434, 0.42017862],
[ 0.46616718, 0.03799028, 0.4197712 ],
[ 0.46974651, 0.03814479, 0.41933615],
[ 0.47332626, 0.03829737, 0.41887362],
[ 0.47690648, 0.03844759, 0.41838382],
[ 0.48048724, 0.03859504, 0.41786692],
[ 0.48406859, 0.0387393 , 0.4173231 ],
[ 0.48765059, 0.03887996, 0.41675252],
[ 0.49123329, 0.03901664, 0.41615535],
[ 0.49481674, 0.03914896, 0.41553173],
[ 0.49840098, 0.03927654, 0.41488182],
[ 0.50198607, 0.03939903, 0.41420574],
[ 0.50557203, 0.03951606, 0.41350363],
[ 0.5091589 , 0.03962732, 0.41277563],
[ 0.51274673, 0.03973246, 0.41202183],
[ 0.51633554, 0.03983116, 0.41124237],
[ 0.51992537, 0.0399231 , 0.41043733],
[ 0.52351627, 0.04000795, 0.40960679],
[ 0.52710824, 0.04008545, 0.40875088],
[ 0.53070131, 0.04015532, 0.40786969],
[ 0.53429549, 0.04021731, 0.40696332],
[ 0.53789081, 0.04027117, 0.40603183],
[ 0.54148728, 0.04031665, 0.40507533],
[ 0.54508492, 0.04035352, 0.40409389],
[ 0.54868373, 0.04038156, 0.40308758],
[ 0.55228372, 0.04040058, 0.40205647],
[ 0.5558849 , 0.04041036, 0.40100064],
[ 0.55948729, 0.04041074, 0.39992014],
[ 0.56309087, 0.04040153, 0.39881503],
[ 0.56669565, 0.04038259, 0.39768539],
[ 0.57030163, 0.04035375, 0.39653126],
[ 0.57390881, 0.04031489, 0.39535269],
[ 0.57751722, 0.04026573, 0.39414962],
[ 0.58112686, 0.0402062 , 0.39292215],
[ 0.58473768, 0.0401363 , 0.39167036],
[ 0.58834967, 0.04005594, 0.39039431],
[ 0.59196282, 0.03996504, 0.38909403],
[ 0.59557712, 0.03986355, 0.38776957],
[ 0.59919256, 0.03975142, 0.38642097],
[ 0.60280911, 0.03962862, 0.38504824],
[ 0.60642675, 0.03949512, 0.38365143],
[ 0.61004548, 0.03935093, 0.38223057],
[ 0.61366525, 0.03919605, 0.38078568],
[ 0.61728607, 0.03903051, 0.37931679],
[ 0.62090789, 0.03885434, 0.37782392],
[ 0.62453069, 0.03866759, 0.37630709],
[ 0.62815445, 0.03847033, 0.37476633],
[ 0.63177914, 0.03826264, 0.37320164],
[ 0.63540472, 0.03804462, 0.37161305],
[ 0.63903116, 0.03781638, 0.37000055],
[ 0.64265844, 0.03757805, 0.36836417],
[ 0.64628652, 0.03732977, 0.36670392],
[ 0.64991535, 0.03707171, 0.36501978],
[ 0.65354494, 0.03680393, 0.36331169],
[ 0.65717526, 0.03652658, 0.36157961],
[ 0.66080623, 0.03624002, 0.35982365],
[ 0.66443779, 0.03594451, 0.35804381],
[ 0.66806992, 0.03564027, 0.35624009],
[ 0.67170256, 0.03532758, 0.35441247],
[ 0.67533568, 0.03500673, 0.35256094],
[ 0.67896921, 0.03467803, 0.35068548],
[ 0.68260313, 0.0343418 , 0.34878608],
[ 0.68623737, 0.03399838, 0.3468627 ],
[ 0.68987188, 0.03364815, 0.34491533],
[ 0.69350662, 0.03329149, 0.34294394],
[ 0.69714152, 0.03292882, 0.34094848],
[ 0.70077654, 0.03256056, 0.33892892],
[ 0.70441162, 0.03218717, 0.33688522],
[ 0.7080467 , 0.03180913, 0.33481734],
[ 0.71168172, 0.03142694, 0.3327252 ],
[ 0.71531661, 0.03104113, 0.33060877],
[ 0.71895133, 0.03065224, 0.32846798],
[ 0.72258579, 0.03026085, 0.32630275],
[ 0.72621995, 0.02986757, 0.32411302],
[ 0.72985373, 0.02947302, 0.32189871],
[ 0.73348706, 0.02907787, 0.31965972],
[ 0.73711987, 0.02868279, 0.31739597],
[ 0.7407521 , 0.02828849, 0.31510735],
[ 0.74438366, 0.02789574, 0.31279376],
[ 0.74801449, 0.02750528, 0.31045508],
[ 0.7516445 , 0.02711795, 0.30809118],
[ 0.75527362, 0.02673455, 0.30570194],
[ 0.75890177, 0.02635598, 0.3032872 ],
[ 0.76252886, 0.02598314, 0.30084681],
[ 0.76615481, 0.02561695, 0.29838061],
[ 0.76977964, 0.02525791, 0.29588802],
[ 0.77340318, 0.02490739, 0.29336914],
[ 0.77702532, 0.0245665 , 0.29082383],
[ 0.78064598, 0.02423635, 0.28825188],
[ 0.78426506, 0.02391805, 0.28565305],
[ 0.78788245, 0.02361278, 0.2830271 ],
[ 0.79149806, 0.02332175, 0.28037377],
[ 0.7951118 , 0.02304622, 0.27769277],
[ 0.79872355, 0.02278747, 0.27498381],
[ 0.8023332 , 0.02254687, 0.27224656],
[ 0.80594065, 0.0223258 , 0.26948069],
[ 0.80954578, 0.02212571, 0.26668583],
[ 0.81314848, 0.02194809, 0.2638616 ],
[ 0.81674864, 0.02179449, 0.26100757],
[ 0.82034612, 0.02166651, 0.2581233 ],
[ 0.82394098, 0.02156487, 0.25520748],
[ 0.82753303, 0.02149164, 0.25225992],
[ 0.83112203, 0.02144911, 0.24928052],
[ 0.83470785, 0.02143911, 0.24626869],
[ 0.83829034, 0.02146355, 0.24322381],
[ 0.84186938, 0.02152438, 0.24014521],
[ 0.8454448 , 0.02162365, 0.23703217],
[ 0.84901646, 0.02176345, 0.23388393],
[ 0.85258421, 0.02194596, 0.23069966],
[ 0.85614815, 0.02217192, 0.22747701],
[ 0.85970797, 0.02244453, 0.22421582],
[ 0.86326339, 0.02276678, 0.22091562],
[ 0.86681423, 0.02314115, 0.21757527],
[ 0.87036031, 0.02357023, 0.21419359],
[ 0.87390146, 0.02405668, 0.21076926],
[ 0.87743748, 0.02460327, 0.20730089],
[ 0.8809686 , 0.02521042, 0.20378433],
[ 0.88449422, 0.02588333, 0.20022025],
[ 0.88801412, 0.0266252 , 0.19660697],
[ 0.89152806, 0.02743921, 0.19294251],
[ 0.89503584, 0.02832862, 0.18922472],
[ 0.89853741, 0.02929578, 0.18544999],
[ 0.90203267, 0.03034339, 0.18161467],
[ 0.90552105, 0.03147694, 0.177718 ],
[ 0.9090023 , 0.03270019, 0.17375685],
[ 0.91247616, 0.03401705, 0.1697277 ],
[ 0.91594267, 0.03542982, 0.16562435],
[ 0.91940148, 0.03694304, 0.16144277],
[ 0.92285205, 0.03856246, 0.15717991],
[ 0.92629411, 0.04029256, 0.15283036],
[ 0.9297275 , 0.04208891, 0.14838643],
[ 0.93315213, 0.04394793, 0.14383905],
[ 0.93656726, 0.04587337, 0.13918364],
[ 0.93997257, 0.04786426, 0.1344111 ],
[ 0.94336796, 0.04991831, 0.12950803],
[ 0.94675308, 0.05203472, 0.12446169],
[ 0.95012726, 0.05421413, 0.11926072],
[ 0.95349009, 0.05645569, 0.11388811],
[ 0.95684173, 0.05875624, 0.10831624],
[ 0.96018119, 0.06111767, 0.10252706],
[ 0.96350801, 0.06353929, 0.09649019],
[ 0.96682217, 0.06601885, 0.09016074],
[ 0.97012287, 0.06855717, 0.08349422],
[ 0.97340949, 0.07115419, 0.07642881],
[ 0.97668194, 0.07380809, 0.06886982],
[ 0.97993936, 0.07651969, 0.06070177],
[ 0.98318108, 0.07928898, 0.05174878]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
gpl-2.0
|
Sentient07/scikit-learn
|
examples/classification/plot_classification_probability.py
|
138
|
2871
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
stable/_downloads/e2d6793c13f4efc273574a721815d5b9/20_seeg.py
|
10
|
7388
|
"""
.. _tut_working_with_seeg:
======================
Working with sEEG data
======================
MNE supports working with more than just MEG and EEG data. Here we show some
of the functions that can be used to facilitate working with
stereoelectroencephalography (sEEG) data.
This example shows how to use:
- sEEG data
- channel locations in MNI space
- projection into a volume
Note that our sample sEEG electrodes are already assumed to be in MNI
space. If you want to map positions from your subject MRI space to MNI
fsaverage space, you must apply the FreeSurfer's talairach.xfm transform
for your dataset. You can take a look at :ref:`tut-freesurfer-mne` for
more information.
For an example that involves ECoG data, channel locations in a
subject-specific MRI, or projection into a surface, see
:ref:`tut_working_with_ecog`. In the ECoG example, we show
how to visualize surface grid channels on the brain.
"""
# Authors: Eric Larson <[email protected]>
# Adam Li <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pandas as pd
import mne
from mne.datasets import fetch_fsaverage
print(__doc__)
# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject
# which is in MNI space
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subject = 'fsaverage'
subjects_dir = sample_path + '/subjects'
# use mne-python's fsaverage data
fetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed
###############################################################################
# Let's load some sEEG electrode locations and names, and turn them into
# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the
# ``.tsv`` file.
# In mne-python, the electrode coordinates are required to be in meters
elec_df = pd.read_csv(misc_path + '/seeg/sample_seeg_electrodes.tsv',
sep='\t', header=0, index_col=None)
ch_names = elec_df['name'].tolist()
ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)
# the test channel coordinates were in mm, so we convert them to meters
ch_coords = ch_coords / 1000.
# create dictionary of channels and their xyz coordinates (now in MNI space)
ch_pos = dict(zip(ch_names, ch_coords))
# Ideally the nasion/LPA/RPA will also be present from the digitization, here
# we use fiducials estimated from the subject's FreeSurfer MNI transformation:
lpa, nasion, rpa = mne.coreg.get_mni_fiducials(
subject, subjects_dir=subjects_dir)
lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']
###############################################################################
# Now we make a :class:`mne.channels.DigMontage` stating that the sEEG
# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system
# for the given subject. Keep in mind that ``fsaverage`` is special in that
# it is already in MNI space.
montage = mne.channels.make_dig_montage(
ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Now we get the :term:`trans` that transforms from our MRI coordinate system
# to the head coordinate frame. This transform will be applied to the
# data when applying the montage so that standard plotting functions like
# :func:`mne.viz.plot_evoked_topomap` will be aligned properly.
trans = mne.channels.compute_native_head_t(montage)
print(trans)
###############################################################################
# Now that we have our montage, we can load in our corresponding
# time-series data and set the montage to the raw data.
# first we'll load in the sample dataset
raw = mne.io.read_raw_edf(misc_path + '/seeg/sample_seeg.edf')
# drop bad channels
raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])
raw.load_data()
raw.drop_channels(raw.info['bads'])
raw.crop(0, 2) # just process 2 sec of data for speed
# attach montage
raw.set_montage(montage)
# set channel types to sEEG (instead of EEG) that have actual positions
raw.set_channel_types(
{ch_name: 'seeg' if np.isfinite(ch_pos[ch_name]).all() else 'misc'
for ch_name in raw.ch_names})
###############################################################################
# Let's check to make sure everything is aligned.
fig = mne.viz.plot_alignment(raw.info, trans, 'fsaverage',
subjects_dir=subjects_dir, show_axes=True,
surfaces=["pial", "head"])
###############################################################################
# Next, we'll get the raw data and plot its amplitude over time.
raw.plot()
###############################################################################
# We can visualize this raw data on the ``fsaverage`` brain (in MNI space) as
# a heatmap. This works by first creating an ``Evoked`` data structure
# from the data of interest (in this example, it is just the raw LFP).
# Then one should generate a ``stc`` data structure, which will be able
# to visualize source activity on the brain in various different formats.
# get standard fsaverage volume (5mm grid) source space
fname_src = op.join(subjects_dir, 'fsaverage', 'bem',
'fsaverage-vol-5-src.fif')
vol_src = mne.read_source_spaces(fname_src)
evoked = mne.EvokedArray(raw.get_data(), raw.info).crop(0, 1) # shorter
stc = mne.stc_near_sensors(
evoked, trans, subject, subjects_dir=subjects_dir, src=vol_src,
verbose='error') # ignore missing electrode warnings
stc = abs(stc) # just look at magnitude
clim = dict(kind='value', lims=np.percentile(abs(evoked.data), [10, 50, 75]))
###############################################################################
# Plot 3D source (brain region) visualization:
#
# By default, `stc.plot_3d() <mne.VolSourceEstimate.plot_3d>` will show a time
# course of the source with the largest absolute value across any time point.
# In this example, it is simply the source with the largest raw signal value.
# Its location is marked on the brain by a small blue sphere.
# sphinx_gallery_thumbnail_number = 4
brain = stc.plot_3d(
src=vol_src, subjects_dir=subjects_dir,
view_layout='horizontal', views=['axial', 'coronal', 'sagittal'],
size=(800, 300), show_traces=0.4, clim=clim,
add_data_kwargs=dict(colorbar_kwargs=dict(label_font_size=8)))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=3, interpolation='linear', framerate=10,
# time_viewer=True, filename='./mne-test-seeg.m4')
###############################################################################
# In this tutorial, we used a BEM surface for the ``fsaverage`` subject from
# FreeSurfer.
#
# For additional common analyses of interest, see the following:
#
# - For volumetric plotting options, including limiting to a specific area of
# the volume specified by say an atlas, or plotting different types of
# source visualizations see:
# :ref:`tut-viz-stcs`.
# - For extracting activation within a specific FreeSurfer volume and using
# different FreeSurfer volumes, see: :ref:`tut-freesurfer-mne`.
# - For working with BEM surfaces and using FreeSurfer, or mne to generate
# them, see: :ref:`tut-forward`.
|
bsd-3-clause
|
FCH808/FCH808.github.io
|
Intro to Data Science/Project1_Part1.py
|
2
|
34015
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 13:46:12 2014
@author: fch
"""
import pandas as pd
import pandasql
import csv
filename="..\\data\\turnstile_data_master_with_weather.csv"
## Question 2.1
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - a count of the number of days in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained). The dataframe will be titled 'weather_data'. You'll
need to provide the SQL query. You might find SQL's count function
useful for this exercise. You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
'''
weather_data = pd.read_csv(filename)
weather_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
## Correct Grader Solution:
# q = """
# SELECT count(*)
# FROM weather_data
# WHERE cast(rain as integer) = 1
# """
##
## Correct grader solution only works on the very small 30x70 dataframe with unique dates.
## Below query works on full turnstile dataset provided to us.
q = """
SELECT COUNT(DISTINCT DATEn) as 'count(*)'
FROM weather_data
WHERE cast(rain as integer) = 1
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q, locals())
return rainy_days
#print num_rainy_days(filename)
## Question 2.2
def max_temp_aggregate_by_fog(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return two columns and
two rows - whether it was foggy or not (0 or 1) and the max
maxtempi for that fog value (i.e., the maximum max temperature
for both foggy and non-foggy days). The dataframe will be
titled 'weather_data'. You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
'''
weather_data = pd.read_csv(filename)
weather_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
q = """
SELECT fog, max(cast (maxtempi as integer)) as MaxTemp
FROM weather_data
GROUP BY fog;
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q, locals())
return rainy_days
# print max_temp_aggregate_by_fog(filename)
def avg_min_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - the average meantempi on days that are a Saturday
or Sunday (i.e., the the average mean temperature on weekends).
The dataframe will be titled 'weather_data' and the you can access
the date in the dataframe via the 'date' column.
You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
Also, you can convert dates t\
the week via the 'strftime' keyword in SQL.
For example, cast (strftime('%w', date) as integer) will return 0 if the date
is a Sunday or 6 if the date is a Saturday.
'''
weather_data = pd.read_csv(filename)
weather_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
## Correct Grader Solution:
# q = """
# SELECT AVG(cast (meantempi as integer))
# FROM weather_data
# WHERE cast (strftime('%w', date) as integer) in (0, 6)
##
q = """
SELECT AVG(cast (meantempi as integer))
FROM weather_data
WHERE cast (strftime('%w', DATEn) as integer) in (0, 6)
"""
#Execute your SQL command against the pandas frame
mean_temp_weekends = pandasql.sqldf(q, locals())
return mean_temp_weekends
#print avg_min_temperature(filename)
def avg_min_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. More specifically you want to find the average
minimum temperature on rainy days where the minimum temperature
is greater than 55 degrees.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
'''
weather_data = pd.read_csv(filename)
weather_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
q = """
SELECT AVG(cast (mintempi as integer))
FROM weather_data
WHERE mintempi > 55
AND rain = 1
"""
#Execute your SQL command against the pandas frame
mean_temp_weekends = pandasql.sqldf(q, locals())
return mean_temp_weekends
#print avg_min_temperature(filename)
def split_seq(seq, size):
""" Split up seq in pieces of size """
return [seq[i:i+size] for i in range(0, len(seq), size)]
filename=["turnstile_110528.txt"]
def fix_turnstile_data(filenames):
'''
Filenames is a list of MTA Subway turnstile text files. A link to an example
MTA Subway turnstile text file can be seen at the URL below:
http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt
As you can see, there are numerous data points included in each row of the
a MTA Subway turnstile text file.
You want to write a function that will update each row in the text
file so there is only one entry per row. A few examples below:
A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739
A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746
A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775
Write the updates to a different text file in the format of "updated_" + filename.
For example:
1) if you read in a text file called "turnstile_110521.txt"
2) you should write the updated data to "updated_turnstile_110521.txt"
The order of the fields should be preserved.
You can see a sample of the turnstile text file that's passed into this function
and the the corresponding updated file in the links below:
Sample input file:
https://www.dropbox.com/s/mpin5zv4hgrx244/turnstile_110528.txt
Sample updated file:
https://www.dropbox.com/s/074xbgio4c39b7h/solution_turnstile_110528.txt
'''
for one_file in filenames:
with open(one_file, "r") as read_data:
# Get rid of extra blank line.
# Open outfile with mode 'wb' instead of 'w'.
# The csv.writer writes \r\n into the file directly.
# If you don't open the file in binary mode,
# it will write \r\r\n because on Windows text mode will translate each \n into \r\n.
with open("updated_{}".format(one_file), "wb") as write_data:
reader = csv.reader(read_data)
writer = csv.writer(write_data)
for line in reader:
header = line[:3]
clean_line = [s.strip() for s in line]
rest = [clean_line[i:i+5] for i in range(3, len(clean_line), 5)]
for each_one in rest:
writer.writerow(header + each_one)
write_data.close()
read_data.close()
#fix_turnstile_data(filename)
filenames = ["updated_turnstile_110528.txt", "updated_turnstile_110528.txt"]
output_file = "output.txt"
def create_master_turnstile_file(filenames, output_file):
'''
Write a function that takes the files in the list filenames, which all have the
columns 'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn', and consolidates
them into one file located at output file. There should be ONE row with the column
headers, located at the top of the file.
For example, if file_1 has:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 1 ...
line 2 ...
and another file, file_2 has:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 3 ...
line 4 ...
line 5 ...
We need to combine file_1 and file_2 into a master_file like below:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 1 ...
line 2 ...
line 3 ...
line 4 ...
line 5 ...
'''
with open(output_file, 'w') as master_file:
master_file.write('C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\n')
for filename in filenames:
with open(filename, 'r') as f:
## discard header
f.readline()
content = f.read()
master_file.write(content)
f.close()
master_file.close()
# create_master_turnstile_file(filenames, output_file)
filename = filename="output.txt"
def filter_by_regular(filename):
'''
This function should read the csv file located at filename into a pandas dataframe,
and filter the dataframe to only rows where the 'DESCn' column has the value 'REGULAR'.
For eample, if the pandas dataframe is as follows:
,C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn
0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151
1,A002,R051,02-00-00,05-01-11,04:00:00,DOOR,3144335,1088159
2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177
3,A002,R051,02-00-00,05-01-11,12:00:00,DOOR,3144424,1088231
The dataframe will look like below after filtering to only rows where DESCn column
has the value 'REGULAR':
0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151
2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177
'''
turnstile_data = pd.read_csv(filename)
#for DESCn, row in turnstile_data.iterrows():
# if row['DESCn'] == 'REGULAR'
turnstile_data = turnstile_data[turnstile_data.DESCn == 'REGULAR']
return turnstile_data
#print filter_by_regular(filename)
#filename = filename="output.txt"
#df = pd.read_csv(filename)
#df = df[(df['SCP'] == '02-00-00') & (df['C/A']=='A002') & (df['UNIT']=='R051')]
def get_hourly_entries(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative entry numbers to a count of entries since the last reading
(i.e., entries since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called ENTRIESn_hourly
2) Assign to the column the difference between ENTRIESn of the current row
and the previous row. If there is any NaN, fill/replace it with 1.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Examples of what your dataframe should look like at the end of this exercise:
C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly
0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 1
1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23
2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18
3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71
4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170
5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214
6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87
7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10
8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36
9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153
10 A002 R051 02-00-00 05-02-11 16:00:00 REGULAR 3145337 1088823 243
...
...
'''
# print df
df['temp'] = df.ENTRIESn.shift(1)
# df['ENTRIESn_hourly'] = 0
#
df['ENTRIESn_hourly'] = (df['ENTRIESn'] - df['temp'])
# df = df.drop('ENTRIESn',1)
del df['temp']
df['ENTRIESn_hourly'].fillna(1, inplace=True)
return df
#df = get_hourly_entries(df)
#print df.head()
## Question 2.9
def get_hourly_exits(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative exit numbers to a count of exits since the last reading
(i.e., exits since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called EXITSn_hourly
2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Example dataframe below:
Unnamed: 0 C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly EXITSn_hourly
0 0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 0 0
1 1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23 8
2 2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18 18
3 3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71 54
4 4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170 44
5 5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214 42
6 6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87 11
7 7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10 3
8 8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36 89
9 9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153 333
'''
df['temp'] = df.EXITSn.shift(1)
df['EXITSn_hourly'] = (df['EXITSn'] - df['temp'])
del df['temp']
df['EXITSn_hourly'].fillna(0, inplace=True)
return df
#df = get_hourly_exits(df)
#print df.head()
## Question 2.10
#time = df['TIMEn'][0]
def time_to_hour(time):
'''
Given an input variable time that represents time in the format of:
00:00:00 (hour:minutes:seconds)
Write a function to extract the hour part from the input variable time
and return it as an integer. For example:
1) if hour is 00, your code should return 0
2) if hour is 01, your code should return 1
3) if hour is 21, your code should return 21
Please return hour as an integer.
'''
hour = int(time[:2])
return hour
#print "Time:", time_to_hour(time)
## Question 2.11
import datetime
#date = df['DATEn'][0]
def reformat_subway_dates(date):
'''
The dates in our subway data are formatted in the format month-day-year.
The dates in our weather underground data are formatted year-month-day.
In order to join these two data sets together, we'll want the dates formatted
the same way. Write a function that takes as its input a date in the MTA Subway
data format, and returns a date in the weather underground format.
Hint:
There is a useful function in the datetime library called strptime.
More info can be seen here:
http://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
'''
date_formatted = datetime.datetime.strptime(date, "%m-%d-%y").strftime("%Y-%m-%d")
return date_formatted
#print "Old format: ", date
#print "New format: ", reformat_subway_dates(date)
###################################################################
## Question 3.1
import numpy as np
import matplotlib.pyplot as plt
filename="..\\data\\turnstile_data_master_with_weather.csv"
weather_data = pd.read_csv(filename)
#print weather_data.head()
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, let's
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Let's plot two histograms on the same axes to show hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histograph may look similar to bar graph in the instructor notes below.
You can read a bit about using matplotlib and pandas to plot histograms here:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can see the information contained within the turnstile weather data here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
turnstile_weather[turnstile_weather['rain'] == 0]['ENTRIESn_hourly'].hist(bins=200, label="No rain").set_xlim([0, 6000]) # your code here to plot a historgram for hourly entries when it is not raining
turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly'].hist(bins=200, label="Rain").set_xlim([0, 6000]) # your code here to plot a historgram for hourly entries when it is raining
plt.legend()
return plt
#print entries_histogram(weather_data)
## Question 3.2
## If we had a small sample size, we could not use Welch's T test,
## but since we have a sufficiently large enough sample size;
## we can use it although the Mann-Whitney would help bolster our claims
## since it is a non-parametric test used for non-normal distribution comparisons.
## Question 3.3
import scipy
import scipy.stats
def mann_whitney_plus_means(turnstile_weather):
'''
This function will consume the turnstile_weather dataframe containing
our final turnstile weather data.
You will want to take the means and run the Mann Whitney U-test on the
ENTRIESn_hourly column in the turnstile_weather dataframe.
This function should return:
1) the mean of entries with rain
2) the mean of entries without rain
3) the Mann-Whitney U-statistic and p-value comparing the number of entries
with rain and the number of entries without rain
You should feel free to use scipy's Mann-Whitney implementation, and you
might also find it useful to use numpy's mean function.
Here are the functions' documentation:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
You can look at the final turnstile weather data at the link below:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
with_rain = turnstile_weather[turnstile_weather['rain'] == 0]['ENTRIESn_hourly'].reset_index(drop=True)
without_rain = turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly'].reset_index(drop=True)
U, p = scipy.stats.mannwhitneyu(without_rain, with_rain)
return without_rain.mean(),with_rain.mean(), U, p # Grader output for means are reversed compared to instructions
# print mann_whitney_plus_means(weather_data)
## Question 3.4
## I ran the Mann-Whiteney U-test and \
## got a difference with (p = 0.019) chance of the difference being due to chance; \
## the result is statistically significant.
## We can reject the null hypothesis that two populations are the same.
## Question 3.5
import pandas
from ggplot import *
"""
In this question, you need to:
1) implement the compute_cost() and gradient_descent() procedures
2) Select features (in the predictions procedure) and make predictions.
"""
def normalize_features(array):
"""
Normalize the features in the data set.
"""
mu = array.mean()
sigma = array.std()
array_normalized = (array - mu)/ sigma
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values,
and the values for our thetas.
This can be the same code as the compute_cost function in the lesson #3 exercises,
but feel free to implement your own.
"""
m = len(values)
sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
This can be the same gradient descent code as in the lesson #3 exercises,
but feel free to implement your own.
"""
m = len(values)
cost_history = []
for i in range(num_iterations):
cost_history.append(compute_cost(features, values, theta))
theta = theta + (alpha/m)* np.dot((values - np.dot(features, theta)), features)
return theta, pandas.Series(cost_history)
def predictions(dataframe):
'''
The NYC turnstile data is stored in a pandas dataframe called weather_turnstile.
Using the information stored in the dataframe, let's predict the ridership of
the NYC subway using linear regression with gradient descent.
You can download the complete turnstile weather dataframe here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Your prediction should have a R^2 value of 0.20 or better.
You need to experiment using various input features contained in the dataframe.
We recommend that you don't use the EXITSn_hourly feature as an input to the
linear model because we cannot use it as a predictor: we cannot use exits
counts as a way to predict entry counts.
Note: Due to the memory and CPU limitation of our Amazon EC2 instance, we will
give you a random subet (~15%) of the data contained in
turnstile_data_master_with_weather.csv. You are encouraged to experiment with
this computer on your own computer, locally.
If you'd like to view a plot of your cost history, uncomment the call to
plot_cost_history below. The slowdown from plotting is significant, so if you
are timing out, the first thing to do is to comment out the plot command again.
If you receive a "server has encountered an error" message, that means you are
hitting the 30-second limit that's placed on running your program. Try using a
smaller number for num_iterations if that's the case.
If you are using your own algorithm/models, see if you can optimize your code so
that it runs faster.
'''
# Select Features (try different features!)
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']]
# Add UNIT to features using dummy variables
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = features.join(dummy_units)
# Values
values = dataframe['ENTRIESn_hourly']
m = len(values)
features, mu, sigma = normalize_features(features)
features['ones'] = np.ones(m) # Add a column of 1s (y intercept)
# Convert features and values to numpy arrays
features_array = np.array(features)
values_array = np.array(values)
# Set values for alpha, number of iterations.
alpha = 0.2 # please feel free to change this value
num_iterations = 15 # please feel free to change this value
# Initialize theta, perform gradient descent
theta_gradient_descent = np.zeros(len(features.columns))
theta_gradient_descent, cost_history = gradient_descent(features_array,
values_array,
theta_gradient_descent,
alpha,
num_iterations)
plot = None
# -------------------------------------------------
# Uncomment the next line to see your cost history
# -------------------------------------------------
plot = plot_cost_history(alpha, cost_history)
#
# Please note, there is a possibility that plotting
# this in addition to your calculation will exceed
# the 30 second limit on the compute servers.
predictions = np.dot(features_array, theta_gradient_descent)
return predictions, plot
def plot_cost_history(alpha, cost_history):
"""This function is for viewing the plot of your cost history.
You can run it by uncommenting this
plot_cost_history(alpha, cost_history)
call in predictions.
If you want to run this locally, you should print the return value
from this function.
"""
cost_df = pandas.DataFrame({
'Cost_History': cost_history,
'Iteration': range(len(cost_history))
})
return ggplot(cost_df, aes('Iteration', 'Cost_History')) + \
geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha )
filename="..\\data\\turnstile_data_master_with_weather.csv"
weather_data = pd.read_csv(filename)
#preds, plot1 = predictions(weather_data)
#print preds
#print plot1
## Question 3.6
def plot_residuals(turnstile_weather, predictions):
'''
Using the same methods that we used to plot a histogram of entries
per hour for our data, why don't you make a histogram of the residuals
(that is, the difference between the original hourly entry data and the predicted values).
Based on this residual histogram, do you have any insight into how our model
performed? Reading a bit on this webpage might be useful:
http://www.itl.nist.gov/div898/handbook/pri/section2/pri24.htm
'''
plt.figure()
(turnstile_weather['ENTRIESn_hourly'] - predictions).hist(bins = 100)
return plt
#print plot_residuals(weather_data, preds)
## Question 3.7
import sys
def compute_r_squared(data, predictions):
'''
In exercise 5, we calculated the R^2 value for you. But why don't you try and
and calculate the R^2 value yourself.
Given a list of original data points, and also a list of predicted data points,
write a function that will compute and return the coefficient of determination (R^2)
for this data. numpy.mean() and numpy.sum() might both be useful here, but
not necessary.
Documentation about numpy.mean() and numpy.sum() below:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html
'''
r_squared = 1 - np.sum(np.square(data - predictions)) / np.sum(np.square(data - np.mean(data)))
#r_squared = 1 - np.square(data - predictions).sum() / np.square(data - np.mean(data)).sum()
return r_squared
#print "R^2: ", compute_r_squared(weather_data['ENTRIESn_hourly'], preds)
## Question 3.8
import statsmodels.formula.api as smf
"""
In this optional exercise, you should complete the function called
predictions(turnstile_weather). This function takes in our pandas
turnstile weather dataframe, and returns a set of predicted ridership values,
based on the other information in the dataframe.
In exercise 3.5 we used Gradient Descent in order to compute the coefficients
theta used for the ridership prediction. Here you should attempt to implement
another way of computing the coeffcients theta. You may also try using a reference implementation such as:
http://statsmodels.sourceforge.net/devel/generated/statsmodels.regression.linear_model.OLS.html
One of the advantages of the statsmodels implementation is that it gives you
easy access to the values of the coefficients theta. This can help you infer relationships
between variables in the dataset.
You may also experiment with polynomial terms as part of the input variables.
The following links might be useful:
http://en.wikipedia.org/wiki/Ordinary_least_squares
http://en.wikipedia.org/w/index.php?title=Linear_least_squares_(mathematics)
http://en.wikipedia.org/wiki/Polynomial_regression
This is your playground. Go wild!
How does your choice of linear regression compare to linear regression
with gradient descent computed in Exercise 3.5?
You can look at the information contained in the turnstile_weather dataframe below:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Note: due to the memory and CPU limitation of our amazon EC2 instance, we will
give you a random subset (~10%) of the data contained in turnstile_data_master_with_weather.csv
If you receive a "server has encountered an error" message, that means you are hitting
the 30 second limit that's placed on running your program. See if you can optimize your code so it
runs faster.
"""
def predictions(weather_turnstile):
#print weather_turnstile
#unit_groups = weather_turnstile.groupby(['UNIT'])
#weather_turnstile['popularity'] = unit_groups[['ENTRIESn_hourly']].transform(sum).sort('ENTRIESn_hourly', ascending=False)
## Not used in exercise problem. Hour here is 0-23 and much less predictive than hour in the version 2 of the dataset.
model = smf.ols(formula="ENTRIESn_hourly ~ UNIT + C(Hour)", data=weather_turnstile)
results = model.fit()
predictions = results.predict()
#print predictions
#print results.summary()
return predictions
#pred2 = predictions(weather_data)
## Question 4.1/4.2
from pandas import *
def plot_weather_data(turnstile_weather):
'''
plot_weather_data is passed a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in Project 3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time-of-day or day-of-week
* How ridership varies by subway station
* Which stations have more exits or entries at different times of day
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out the link
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
to see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we will give you only
about 1/3 of the actual data in the turnstile_weather dataframe.
'''
## The server encountered an error. Please try running again. encountered a lot.
## ggplot missing many features. Only basic plots shown for inbrowser exercises.
## Q4.1
plot = ggplot(turnstile_weather, aes('ENTRIESn_hourly', fill='rain')) + geom_bar(binwidth=100) + xlim(low=0, high=5000) + \
xlab("Hourly Entries - Bins of Size 100") + \
ylab("Hourly Entries - Count in each bin") + \
ggtitle("Hourly Entries Histogram - Rain vs. No Rain (Stacked)")
## Q. 4.2
plot2 = ggplot(weather_data, aes('Hour', 'ENTRIESn_hourly', fill='rain')) + geom_bar() + \
xlab("Hour of the day") + ggtitle("Distribution of ridership throughout the day - Rain (Blue) / No Rain (Red)") + ylab("Entries")
return plot, plot2
#print plot_weather_data(weather_data)
|
mit
|
lesteve/joblib
|
examples/compressors_comparison.py
|
3
|
9085
|
"""
===============================
Improving I/O using compressors
===============================
This example compares the compressors available in Joblib. In the example,
Zlib, LZMA and LZ4 compression only are used but Joblib also supports BZ2 and
GZip compression methods.
For each compared compression method, this example dumps and reloads a
dataset fetched from an online machine-learning database. This gives 3
informations: the size on disk of the compressed data, the time spent to dump
and the time spent to reload the data from disk.
"""
import os
import os.path
import time
###############################################################################
# Get some data from real-world use cases
# ---------------------------------------
#
# First fetch the benchmark dataset from an online machine-learning database
# and load it in a pandas dataframe.
import pandas as pd
url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/"
"kddcup99-mld/kddcup.data.gz")
names = ("duration, protocol_type, service, flag, src_bytes, "
"dst_bytes, land, wrong_fragment, urgent, hot, "
"num_failed_logins, logged_in, num_compromised, "
"root_shell, su_attempted, num_root, "
"num_file_creations, ").split(', ')
data = pd.read_csv(url, names=names, nrows=1e6)
###############################################################################
# Dump and load the dataset without compression
# ---------------------------------------------
#
# This gives reference values for later comparison.
from joblib import dump, load
pickle_file = './pickle_data.joblib'
###############################################################################
# Start by measuring the time spent for dumping the raw data:
start = time.time()
with open(pickle_file, 'wb') as f:
dump(data, f)
raw_dump_duration = time.time() - start
print("Raw dump duration: %0.3fs" % raw_dump_duration)
###############################################################################
# Then measure the size of the raw dumped data on disk:
raw_file_size = os.stat(pickle_file).st_size / 1e6
print("Raw dump file size: %0.3fMB" % raw_file_size)
###############################################################################
# Finally measure the time spent for loading the raw data:
start = time.time()
with open(pickle_file, 'rb') as f:
load(f)
raw_load_duration = time.time() - start
print("Raw load duration: %0.3fs" % raw_load_duration)
###############################################################################
# Dump and load the dataset using the Zlib compression method
# -----------------------------------------------------------
#
# The compression level is using the default value, 3, which is, in general, a
# good compromise between compression and speed.
###############################################################################
# Start by measuring the time spent for dumping of the zlib data:
start = time.time()
with open(pickle_file, 'wb') as f:
dump(data, f, compress='zlib')
zlib_dump_duration = time.time() - start
print("Zlib dump duration: %0.3fs" % zlib_dump_duration)
###############################################################################
# Then measure the size of the zlib dump data on disk:
zlib_file_size = os.stat(pickle_file).st_size / 1e6
print("Zlib file size: %0.3fMB" % zlib_file_size)
###############################################################################
# Finally measure the time spent for loading the compressed dataset:
start = time.time()
with open(pickle_file, 'rb') as f:
load(f)
zlib_load_duration = time.time() - start
print("Zlib load duration: %0.3fs" % zlib_load_duration)
###############################################################################
# .. note:: The compression format is detected automatically by Joblib.
# The compression format is identified by the standard magic number present
# at the beginning of the file. Joblib uses this information to determine
# the compression method used.
# This is the case for all compression methods supported by Joblib.
###############################################################################
# Dump and load the dataset using the LZMA compression method
# -----------------------------------------------------------
#
# LZMA compression method has a very good compression rate but at the cost
# of being very slow.
# In this example, a light compression level, e.g. 3, is used to speed up a
# bit the dump/load cycle.
###############################################################################
# Start by measuring the time spent for dumping the lzma data:
start = time.time()
with open(pickle_file, 'wb') as f:
dump(data, f, compress=('lzma', 3))
lzma_dump_duration = time.time() - start
print("LZMA dump duration: %0.3fs" % lzma_dump_duration)
###############################################################################
# Then measure the size of the lzma dump data on disk:
lzma_file_size = os.stat(pickle_file).st_size / 1e6
print("LZMA file size: %0.3fMB" % lzma_file_size)
###############################################################################
# Finally measure the time spent for loading the lzma data:
start = time.time()
with open(pickle_file, 'rb') as f:
load(f)
lzma_load_duration = time.time() - start
print("LZMA load duration: %0.3fs" % lzma_load_duration)
###############################################################################
# Dump and load the dataset using the LZ4 compression method
# ----------------------------------------------------------
#
# LZ4 compression method is known to be one of the fastest available
# compression method but with a compression rate a bit lower than Zlib. In
# most of the cases, this method is a good choice.
###############################################################################
# .. note:: In order to use LZ4 compression with Joblib, the
# `lz4 <https://pypi.python.org/pypi/lz4>`_ package must be installed
# on the system.
###############################################################################
# Start by measuring the time spent for dumping the lz4 data:
start = time.time()
with open(pickle_file, 'wb') as f:
dump(data, f, compress='lz4')
lz4_dump_duration = time.time() - start
print("LZ4 dump duration: %0.3fs" % lz4_dump_duration)
###############################################################################
# Then measure the size of the lz4 dump data on disk:
lz4_file_size = os.stat(pickle_file).st_size / 1e6
print("LZ4 file size: %0.3fMB" % lz4_file_size)
###############################################################################
# Finally measure the time spent for loading the lz4 data:
start = time.time()
with open(pickle_file, 'rb') as f:
load(f)
lz4_load_duration = time.time() - start
print("LZ4 load duration: %0.3fs" % lz4_load_duration)
###############################################################################
# Comparing the results
# ---------------------
import numpy as np
import matplotlib.pyplot as plt
N = 4
load_durations = (raw_load_duration, lz4_load_duration, zlib_load_duration,
lzma_load_duration)
dump_durations = (raw_dump_duration, lz4_dump_duration, zlib_dump_duration,
lzma_dump_duration)
file_sizes = (raw_file_size, lz4_file_size, zlib_file_size, lzma_file_size)
ind = np.arange(N)
width = 0.5
plt.figure(1, figsize=(5, 4))
p1 = plt.bar(ind, dump_durations, width)
p2 = plt.bar(ind, load_durations, width, bottom=dump_durations)
plt.ylabel('Time in seconds')
plt.title('Dump and load durations')
plt.xticks(ind, ('Raw', 'LZ4', 'Zlib', 'LZMA'))
plt.yticks(np.arange(0, lzma_load_duration + lzma_dump_duration))
plt.legend((p1[0], p2[0]), ('Dump duration', 'Load duration'))
###############################################################################
# Compared with other compressors, LZ4 is clearly the fastest, especially for
# dumping compressed data on disk. In this particular case, it can even be
# faster than the raw dump.
# Also note that dump and load durations depend on the I/O speed of the
# underlying storage: for example, with SSD hard drives the LZ4 compression
# will be slightly slower than raw dump/load, whereas with spinning hard disk
# drives (HDD) or remote storage (NFS), LZ4 is faster in general.
#
# LZMA and Zlib, even if always slower for dumping data, are quite fast when
# re-loading compressed data from disk.
plt.figure(2, figsize=(5, 4))
plt.bar(ind, file_sizes, width, log=True)
plt.ylabel('File size in MB')
plt.xticks(ind, ('Raw', 'LZ4', 'Zlib', 'LZMA'))
###############################################################################
# Compressed data obviously takes a lot less space on disk than raw data. LZMA
# is the best compression method in terms of compression rate. Zlib also has a
# better compression rate than LZ4.
plt.show()
###############################################################################
# Clear the pickle file
# ---------------------
import os
os.remove(pickle_file)
|
bsd-3-clause
|
FabriceSalvaire/PyValentina
|
Patro/GraphicEngine/Painter/MplPainter.py
|
1
|
4266
|
####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import logging
from matplotlib import pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from Patro.GeometryEngine.Vector import Vector2D
from Patro.GraphicStyle import StrokeStyle
from .Painter import Painter, Tiler
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class MplPainter(Painter):
__STROKE_STYLE__ = {
StrokeStyle.NoPen: None,
StrokeStyle.SolidLine: 'solid', # '-'
StrokeStyle.DashLine: '', # Fixme:
StrokeStyle.DotLine: 'dotted', # ':'
StrokeStyle.DashDotLine: 'dashdot', # '--'
StrokeStyle.DashDotDotLine: '', # Fixme:
}
##############################################
def __init__(self, scene, paper):
super().__init__(scene)
self._paper = paper
self._figure = plt.figure(
# figsize=(self._paper.width_in, self._paper.height_in),
# dpi=200,
)
self._axes = self._figure.add_subplot(111)
bounding_box = scene.bounding_box
factor = 10 / 100
x_margin = bounding_box.x.length * factor
y_margin = bounding_box.y.length * factor
margin = max(x_margin, y_margin)
bounding_box = bounding_box.clone().enlarge(margin)
self._axes.set_xlim(bounding_box.x.inf, bounding_box.x.sup)
self._axes.set_ylim(bounding_box.y.inf, bounding_box.y.sup)
self._axes.set_aspect('equal')
self.paint()
##############################################
def show(self):
plt.show()
##############################################
def _add_path(self, item, vertices, codes):
path = Path(vertices, codes)
path_syle = item.path_style
color = path_syle.stroke_color.name
line_style = self.__STROKE_STYLE__[path_syle.stroke_style]
line_width = path_syle.line_width_as_float
patch = patches.PathPatch(path, edgecolor=color, facecolor='none', linewidth=line_width, linestyle=line_style)
self._axes.add_patch(patch)
##############################################
def paint_TextItem(self, item):
position = self.cast_position(item.position)
# Fixme: anchor position
self._axes.text(position.x, position.y, item.text)
##############################################
def paint_CircleItem(self, item):
center = list(self.cast_position(item.position))
circle = plt.Circle(center, .5, color='black')
self._axes.add_artist(circle)
##############################################
def paint_SegmentItem(self, item):
vertices = self.cast_item_coordinates(item)
codes = [Path.MOVETO, Path.LINETO]
self._add_path(item, vertices, codes)
##############################################
def paint_CubicBezierItem(self, item):
vertices = self.cast_item_coordinates(item)
codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
self._add_path(item, vertices, codes)
|
gpl-3.0
|
pypot/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
ElchinValiyev/GameAI
|
Project_3/breakout/fuzzy_agent.py
|
1
|
1804
|
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import matplotlib.pyplot as plt
class FuzzyAgent:
def __init__(self):
# Generate universe variables
distance = ctrl.Antecedent(np.arange(-440, 441, 1), 'distance') # paddle.x - ball.x
paddle_speed = ctrl.Consequent(np.arange(-12,13,4), 'speed') # paddle.x +=paddle_speed
# Auto-membership function population is possible with .automf(3, 5, or 7)
# Generate fuzzy membership functions
distance['far right'] = fuzz.trimf(distance.universe, [-440, -250, -110])
distance['close right'] = fuzz.trimf(distance.universe, [-200, -10, 0])
distance['close left'] = fuzz.trimf(distance.universe, [0, 10, 200])
distance['far left'] = fuzz.trimf(distance.universe, [200, 440, 440])
paddle_speed.automf(7)
rule1 = ctrl.Rule(distance['far left'], paddle_speed['dismal'])
rule2 = ctrl.Rule(distance['close left'], paddle_speed['dismal'])
rule3 = ctrl.Rule(distance['close right'], paddle_speed['excellent'])
rule4 = ctrl.Rule(distance['far right'], paddle_speed['excellent'])
# rule5 = ctrl.Rule(distance['above'], paddle_speed['average'])
paddle_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4])
self.agent = ctrl.ControlSystemSimulation(paddle_ctrl)
def compute(self, distance):
# Pass inputs to the ControlSystem using Antecedent labels with Pythonic API
# Note: if you like passing many inputs all at once, use .inputs(dict_of_data)
self.agent.input['distance'] = distance
# Crunch the numbers
self.agent.compute()
return self.agent.output['speed']
if __name__ == '__main__':
agent = FuzzyAgent()
print agent.compute(0)
|
mit
|
mjescobar/RF_Estimation
|
Clustering/clustering/spaceClustering.py
|
2
|
4653
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# spaceClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs basic clustering based on the size of the RF
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
import argparse # argument parsing
import numpy as np # Numpy
import densityPeaks as dp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sklearn import mixture
clustersColours = ['blue', 'red', 'green', 'orange', 'black','yellow', \
'#ff006f','#00e8ff','#fcfa00', '#ff0000', '#820c2c', \
'#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff', \
'#0c820e','#28ea04','#ea8404','#c8628f','#6283ff', \
'#5b6756','#0c8248','k','#820cff','#932c11', \
'#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='spaceClustering.py',
description='Performs basic clustering based on the size of th RF',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--percentage',
help='Percentage used to calculate the distance',
type=float, default='2', required=False)
parser.add_argument('--xSize',
help='X size of the stimuli',
type=int, default='31', required=False)
parser.add_argument('--ySize',
help='Y size of the stimuli',
type=int, default='31', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
print ''
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
print ''
sys.exit()
units = []
dataCluster = np.zeros((1,7))
for unitFile in sorted(os.listdir(sourceFolder)):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
dataCluster = np.vstack((dataCluster,[fitResult[0][2],\
fitResult[0][3],fitResult[0][1],fitResult[0][4],\
fitResult[0][5],fitResult[0][2]*fitResult[0][3]*3,\
(fitResult[0][2]+fitResult[0][3])/2]))
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
percentage = args.percentage #exploratory, '...for large data sets, the results of the analysis are robust with respect to the choice of d_c'
# Area instead o Radius
#clustersNumber, labels = dp.predict(dataCluster[:,0:2], percentage)
clustersNumber, labels = dp.predict(dataCluster[:,5:7], percentage)
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='spherical')
gmix.fit(dataCluster[:,5:7])
labels = gmix.predict(dataCluster[:,5:7])
for clusterId in range(clustersNumber):
clusterFile = open(outputFolder+'cluster_'+str(clusterId)+'.csv', "w")
for unit in range(labels.size):
if labels[unit] == clusterId:
clusterFile.write(units[unit]+'\n')
clusterFile.close
xSize = args.xSize
ySize = args.ySize
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,7))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,7))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
## remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla, outputFolder+'Grilla_'+str(clusterId)+'.png', 0, clustersColours[clusterId], xSize, ySize)
return 0
if __name__ == '__main__':
main()
|
gpl-2.0
|
rohangoel96/IRCLogParser
|
IRCLogParser/lib/deprecated/scripts/parser-time_series.py
|
2
|
6235
|
#This code generates a time-series graph. Such a graph has users on the y axis and msg transmission time on x axis.This means that if there exit 4 users- A,B,C,D.
#Then if any of these users send a message at time t, then we put a dot infront of that user at time t in the graph.
import os.path
import re
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pylab
import pygraphviz as pygraphviz
import numpy
import datetime
import time
import pandas as pd
#yofel shadeslayer, yofel pheonixbrd
for iterator in range(2,3):
for fileiterator in range(1,2):
if(fileiterator<10):
sttring="/home/dhruvie/LOP/2013/"+str(iterator)+"/0"
sttring=sttring+str(fileiterator)+"/#kubuntu-devel.txt"
else:
sttring="/home/dhruvie/LOP/2013/"+str(iterator)+"/"
sttring=sttring+str(fileiterator)+"/#kubuntu-devel.txt"
if not os.path.exists(sttring):
continue
with open(sttring) as f:
content = f.readlines() #contents stores all the lines of the file kubunutu-devel
nicks = [] #list of all the nicknames
send_time = [] #list of all the times a user sends a message to another user
conv_time = []
Numofmsg = []
channel= "#kubuntu-devel" #channel name
groups = ['yofel_','phoenix_firebrd', 'shadeslayer'] #first will be assigned UID 50, second 100, third 150 and so on
groupsnum = []
groupsnum.append(50)
for i in range(0, len(groups)-1):
groupsnum.append(50+groupsnum[i])
#code for getting all the nicknames in a list
for i in content:
if(i[0] != '=' and "] <" in i and "> " in i):
m = re.search(r"\<(.*?)\>", i)
if m.group(0) not in nicks:
nicks.append(m.group(0)) #used regex to get the string between <> and appended it to the nicks list
for i in xrange(0,len(nicks)):
nicks[i] = nicks[i][1:-1] #removed <> from the nicknames
for i in xrange(0,len(nicks)):
if(nicks[i][len(nicks[i])-1]=='\\'):
nicks[i]=nicks[i][:-1]
nicks[i]=nicks[i]+'CR'
for j in content:
if(j[0]=='=' and "changed the topic of" not in j):
line1=j[j.find("=")+1:j.find(" is")]
line2=j[j.find("wn as")+1:j.find("\n")]
line1=line1[3:]
line2=line2[5:]
if(line1[len(line1)-1]=='\\'):
line1=line1[:-1]
line1=line1 + 'CR'
if(line2[len(line2)-1]=='\\'):
line2=line2[:-1]
line2=line2 + 'CR'
if line1 not in nicks:
nicks.append(line1)
if line2 not in nicks:
nicks.append(line2)
#code for forming list of lists for avoiding nickname duplicacy
x=[[] for i in range(len(nicks))]
for line in content:
if(line[0]=='=' and "changed the topic of" not in line):
line1=line[line.find("=")+1:line.find(" is")]
line2=line[line.find("wn as")+1:line.find("\n")]
line1=line1[3:]
line2=line2[5:]
if(line1[len(line1)-1]=='\\'):
line1=line1[:-1]
line1=line1 + 'CR'
if(line2[len(line2)-1]=='\\'):
line2=line2[:-1]
line2=line2 + 'CR'
for i in range(len(nicks)):
if line1 in x[i]:
x[i].append(line1)
x[i].append(line2)
break
if not x[i]:
x[i].append(line1)
x[i].append(line2)
break
#code for making relation map between clients
for line in content:
flag_comma = 0
if(line[0] != '=' and "] <" in line and "> " in line):
m = re.search(r"\<(.*?)\>", line)
var = m.group(0)[1:-1]
if(var[len(var)-1]=='\\'):
var=var[:-1]
var=var + 'CR'
for d in range(len(nicks)):
if var in x[d]:
pehla = x[d][0]
break
else:
pehla=var
for i in nicks:
data=[e.strip() for e in line.split(':')]
data[1]=data[1][data[1].find(">")+1:len(data[1])]
data[1]=data[1][1:]
if not data[1]:
break
for ik in xrange(0,len(data)):
if(data[ik] and data[ik][len(data[ik])-1]=='\\'):
data[ik]=data[ik][:-1]
data[ik]=data[ik] + 'CR'
for z in data:
if(z==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6]) #We store time and index of sender, so that in our graph we can put a mark on that index at that time.
Numofmsg.append(groupsnum[groups.index(pehla)])
if "," in data[1]:
flag_comma = 1
data1=[e.strip() for e in data[1].split(',')]
for ij in xrange(0,len(data1)):
if(data1[ij] and data1[ij][len(data1[ij])-1]=='\\'):
data1[ij]=data1[ij][:-1]
data1[ij]=data1[ij] + 'CR'
for j in data1:
if(j==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6])
Numofmsg.append(groupsnum[groups.index(pehla)])
if(flag_comma == 0):
search2=line[line.find(">")+1:line.find(", ")]
search2=search2[1:]
if(search2[len(search2)-1]=='\\'):
search2=search2[:-1]
search2=search2 + 'CR'
if(search2==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6])
Numofmsg.append(groupsnum[groups.index(pehla)])
print(conv_time)
print(Numofmsg)
data = {'Time': conv_time,
'Message_Sent': Numofmsg}
df = pd.DataFrame(data, columns = ['Time', 'Message_Sent'])
df.index = df['Time']
del df['Time']
df
print(df)
axes = plt.gca()
axes.set_ylim([0,200])
df.plot(ax=axes ,style=['o','rx'])
plt.savefig('time-series.png')
plt.close()
#Here we have plotted the graph with msg transmission time as x axis and users(A(50),B(100),C(150).....) as y axis.
#User who sends more messages will have a higher density of dots infront of its index.
|
mit
|
BiaDarkia/scikit-learn
|
examples/svm/plot_svm_regression.py
|
37
|
1505
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
# #############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
# #############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
# #############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
# #############################################################################
# Look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
nkhuyu/blaze
|
blaze/expr/tests/test_table.py
|
12
|
25536
|
from __future__ import absolute_import, division, print_function
import pytest
import pandas as pd
from operator import (add, sub, mul, floordiv, mod, pow, truediv, eq, ne, lt,
gt, le, ge, getitem)
from functools import partial
from datetime import datetime
import datashape
from datashape.predicates import iscollection, isscalar
from blaze import CSV, Table
from blaze.expr import (TableSymbol, projection, Field, selection, Broadcast,
join, cos, by, exp, distinct, Apply,
broadcast, eval_str, merge, common_subexpression, sum,
Label, ReLabel, Head, Sort, any, summary,
Summary, count, symbol, Field, discover,
max, min, label, Symbol, transform
)
from blaze.compatibility import PY3, builtins
from blaze.utils import raises, tmpfile
from datashape import dshape, var, int32, int64, Record, DataShape
from toolz import identity, first
import numpy as np
def test_dshape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.dshape == dshape('var * {name: string, amount: int}')
def test_length():
t = TableSymbol('t', '10 * {name: string, amount: int}')
s = TableSymbol('s', '{name:string, amount:int}')
assert t.dshape == dshape('10 * {name: string, amount: int}')
assert len(t) == 10
assert len(t.name) == 10
assert len(t[['name']]) == 10
assert len(t.sort('name')) == 10
assert len(t.head(5)) == 5
assert len(t.head(50)) == 10
with pytest.raises(ValueError):
len(s)
def test_tablesymbol_eq():
assert not (TableSymbol('t', '{name: string}')
== TableSymbol('v', '{name: string}'))
def test_table_name():
t = TableSymbol('t', '10 * {people: string, amount: int}')
r = TableSymbol('r', 'int64')
with pytest.raises(AttributeError):
t.name
with pytest.raises(AttributeError):
r.name
def test_shape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.shape
assert isinstance(t.shape, tuple)
assert len(t.shape) == 1
def test_eq():
assert TableSymbol('t', '{a: string, b: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
assert not TableSymbol('t', '{b: string, a: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
def test_arithmetic():
t = TableSymbol('t', '{x: int, y: int, z: int}')
x, y, z = t['x'], t['y'], t['z']
exprs = [x + 1, x + y, 1 + y,
x - y, 1 - x, x - 1,
x ** y, x ** 2, 2 ** x,
x * y, x ** 2, 2 ** x,
x / y, x / 2, 2 / x,
x % y, x % 2, 2 % x]
def test_column():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.fields== ['name', 'amount']
assert eval(str(t.name)) == t.name
assert str(t.name) == "t.name"
with pytest.raises(AttributeError):
t.name.balance
with pytest.raises((NotImplementedError, ValueError)):
getitem(t, set('balance'))
def test_symbol_projection_failures():
t = TableSymbol('t', '10 * {name: string, amount: int}')
with pytest.raises(ValueError):
t._project(['name', 'id'])
with pytest.raises(AttributeError):
t.foo
with pytest.raises(TypeError):
t._project(t.dshape)
def test_Projection():
t = TableSymbol('t', '{name: string, amount: int, id: int32}')
p = projection(t, ['amount', 'name'])
assert p.schema == dshape('{amount: int32, name: string}')
print(t['amount'].dshape)
print(dshape('var * int32'))
assert t['amount'].dshape == dshape('var * int32')
assert t['amount']._name == 'amount'
assert eval(str(p)).isidentical(p)
assert p._project(['amount','name']) == p[['amount','name']]
with pytest.raises(ValueError):
p._project('balance')
def test_Projection_retains_shape():
t = TableSymbol('t', '5 * {name: string, amount: int, id: int32}')
assert t[['name', 'amount']].dshape == \
dshape('5 * {name: string, amount: int}')
def test_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert t[['amount', 'id']] == projection(t, ['amount', 'id'])
assert t['amount'].isidentical(Field(t, 'amount'))
def test_relational():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
r = (t['name'] == 'Alice')
assert 'bool' in str(r.dshape)
assert r._name
def test_selection():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
s = selection(t, t['name'] == 'Alice')
f = selection(t, t['id'] > t['amount'])
p = t[t['amount'] > 100]
with pytest.raises(ValueError):
selection(t, p)
assert s.dshape == t.dshape
def test_selection_typecheck():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert raises(TypeError, lambda: t[t['amount'] + t['id']])
assert raises(TypeError, lambda: t[t['name']])
def test_selection_by_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t['name'] == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_by_getattr():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t.name == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_path_check():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
t2 = t[t.name == 'Alice']
t3 = t2[t2.amount > 0]
def test_path_issue():
t = TableSymbol('t', "{topic: string, word: string, result: ?float64}")
t2 = transform(t, sizes=t.result.map(lambda x: (x - MIN)*10/(MAX - MIN),
schema='float64', name='size'))
assert builtins.any(t2.sizes.isidentical(node) for node in t2.children)
def test_getattr_doesnt_override_properties():
t = TableSymbol('t', '{_subs: string, schema: string}')
assert callable(t._subs)
assert isinstance(t.schema, DataShape)
def test_dir_contains_columns():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = dir(t)
columns_set = set(t.fields)
assert set(result) & columns_set == columns_set
def test_selection_consistent_children():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t['name'][t['amount'] < 0]
assert list(expr.fields) == ['name']
def test_str():
import re
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t[t['amount'] < 0]['id'] * 2
assert '<class' not in str(expr)
assert not re.search('0x[0-9a-f]+', str(expr))
assert eval(str(expr)) == expr
assert '*' in repr(expr)
def test_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
r = TableSymbol('r', '{name: string, amount: int}')
q = TableSymbol('q', '{name: int}')
j = join(t, s, 'name', 'name')
assert j.schema == dshape('{name: string, amount: int, id: int}')
assert join(t, s, 'name') == join(t, s, 'name')
assert join(t, s, 'name').on_left == 'name'
assert join(t, s, 'name').on_right == 'name'
assert join(t, r, ('name', 'amount')).on_left == ['name', 'amount']
with pytest.raises(TypeError):
join(t, q, 'name')
with pytest.raises(ValueError):
join(t, s, how='upside_down')
def test_join_different_on_right_left_columns():
t = TableSymbol('t', '{x: int, y: int}')
s = TableSymbol('t', '{a: int, b: int}')
j = join(t, s, 'x', 'a')
assert j.on_left == 'x'
assert j.on_right == 'a'
def test_joined_column_first_in_schema():
t = TableSymbol('t', '{x: int, y: int, z: int}')
s = TableSymbol('s', '{w: int, y: int}')
assert join(t, s).schema == dshape('{y: int, x: int, z: int, w: int}')
def test_outer_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
jleft = join(t, s, 'name', 'name', how='left')
jright = join(t, s, 'name', 'name', how='right')
jinner = join(t, s, 'name', 'name', how='inner')
jouter = join(t, s, 'name', 'name', how='outer')
js = [jleft, jright, jinner, jouter]
assert len(set(js)) == 4 # not equal
assert jinner.schema == dshape('{name: string, amount: int, id: int}')
assert jleft.schema == dshape('{name: string, amount: int, id: ?int}')
assert jright.schema == dshape('{name: string, amount: ?int, id: int}')
assert jouter.schema == dshape('{name: string, amount: ?int, id: ?int}')
# Default behavior
assert join(t, s, 'name', 'name', how='inner') == \
join(t, s, 'name', 'name')
def test_join_default_shared_columns():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
assert join(t, s) == join(t, s, 'name', 'name')
def test_multi_column_join():
a = TableSymbol('a', '{x: int, y: int, z: int}')
b = TableSymbol('b', '{w: int, x: int, y: int}')
j = join(a, b, ['x', 'y'])
assert set(j.fields) == set('wxyz')
assert j.on_left == j.on_right == ['x', 'y']
assert hash(j)
assert j.fields == ['x', 'y', 'z', 'w']
def test_traverse():
t = TableSymbol('t', '{name: string, amount: int}')
assert t in list(t._traverse())
expr = t.amount.sum()
trav = list(expr._traverse())
assert builtins.any(t.amount.isidentical(x) for x in trav)
def test_unary_ops():
t = TableSymbol('t', '{name: string, amount: int}')
expr = cos(exp(t['amount']))
assert 'cos' in str(expr)
assert '~' in str(~(t.amount > 0))
def test_reduction():
t = TableSymbol('t', '{name: string, amount: int32}')
r = sum(t['amount'])
assert r.dshape in (dshape('int64'),
dshape('{amount: int64}'),
dshape('{amount_sum: int64}'))
assert 'amount' not in str(t.count().dshape)
assert t.count().dshape[0] in (int32, int64)
assert 'int' in str(t.count().dshape)
assert 'int' in str(t.nunique().dshape)
assert 'string' in str(t['name'].max().dshape)
assert 'string' in str(t['name'].min().dshape)
assert 'string' not in str(t.count().dshape)
t = TableSymbol('t', '{name: string, amount: real, id: int}')
assert 'int' in str(t['id'].sum().dshape)
assert 'int' not in str(t['amount'].sum().dshape)
def test_reduction_name():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert (t.amount + t.id).sum()._name
def test_max_min_class():
t = TableSymbol('t', '{name: string, amount: int32}')
assert str(max(t).dshape) == '{name: string, amount: int32}'
assert str(min(t).dshape) == '{name: string, amount: int32}'
@pytest.fixture
def symsum():
t = TableSymbol('t', '{name: string, amount: int32}')
return t, t.amount.sum()
@pytest.fixture
def ds():
return dshape("var * { "
"transaction_key : int64, "
"user_from_key : int64, "
"user_to_key : int64, "
"date : int64, "
"value : float64 "
"}")
def test_discover_dshape_symbol(ds):
t_ds = TableSymbol('t', dshape=ds)
assert t_ds.fields is not None
t_sch = TableSymbol('t', dshape=ds.subshape[0])
assert t_sch.fields is not None
assert t_ds.isidentical(t_sch)
class TestScalarArithmetic(object):
ops = {'+': add, '-': sub, '*': mul, '/': truediv, '//': floordiv, '%': mod,
'**': pow, '==': eq, '!=': ne, '<': lt, '>': gt, '<=': le, '>=': ge}
def test_scalar_arith(self, symsum):
def runner(f):
result = f(r, 1)
assert eval('r %s 1' % op).isidentical(result)
a = f(r, r)
b = eval('r %s r' % op)
assert a is b or a.isidentical(b)
result = f(1, r)
assert eval('1 %s r' % op).isidentical(result)
t, r = symsum
r = t.amount.sum()
for op, f in self.ops.items():
runner(f)
def test_scalar_usub(self, symsum):
t, r = symsum
result = -r
assert eval(str(result)).isidentical(result)
@pytest.mark.xfail
def test_scalar_uadd(self, symsum):
t, r = symsum
+r
def test_summary():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
s = summary(total=t.amount.sum(), num=t.id.count())
assert s.dshape == dshape('{num: int32, total: int64}')
assert hash(s)
assert eval(str(s)).isidentical(s)
assert 'summary(' in str(s)
assert 'total=' in str(s)
assert 'num=' in str(s)
assert str(t.amount.sum()) in str(s)
assert not summary(total=t.amount.sum())._child.isidentical(
t.amount.sum())
assert iscollection(summary(total=t.amount.sum() + 1)._child.dshape)
def test_reduction_arithmetic():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
expr = t.amount.sum() + 1
assert eval(str(expr)).isidentical(expr)
def test_Distinct():
t = TableSymbol('t', '{name: string, amount: int32}')
r = distinct(t['name'])
print(r.dshape)
assert r.dshape == dshape('var * string')
assert r._name == 'name'
r = t.distinct()
assert r.dshape == t.dshape
def test_by():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
r = by(t['name'], total=sum(t['amount']))
print(r.schema)
assert isinstance(r.schema[0], Record)
assert str(r.schema[0]['name']) == 'string'
def test_by_summary():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
a = by(t['name'], sum=sum(t['amount']))
b = by(t['name'], summary(sum=sum(t['amount'])))
assert a.isidentical(b)
def test_by_summary_printing():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert str(by(t.name, total=sum(t.amount))) == \
'by(t.name, total=sum(t.amount))'
def test_by_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert len(by(t['id'], total=t['amount'].sum()).fields) == 2
assert len(by(t['id'], count=t['id'].count()).fields) == 2
print(by(t, count=t.count()).fields)
assert len(by(t, count=t.count()).fields) == 4
def test_sort():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.sort('amount', ascending=True)
print(str(s))
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
assert t['amount'].sort().key == 'amount'
def test_head():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.head(10)
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
def test_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
quantity = (t['amount'] + 100).label('quantity')
assert eval(str(quantity)).isidentical(quantity)
assert quantity.fields == ['quantity']
with pytest.raises(ValueError):
quantity['balance']
def test_map_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
c = t.amount.map(identity, schema='int32')
assert c.label('bar')._name == 'bar'
assert c.label('bar')._child.isidentical(c._child)
def test_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert list(t.fields) == ['name', 'amount', 'id']
assert list(t['name'].fields) == ['name']
(t['amount'] + 1).fields
def test_relabel():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
rl = t.relabel({'name': 'NAME', 'id': 'ID'})
rlc = t['amount'].relabel({'amount': 'BALANCE'})
assert eval(str(rl)).isidentical(rl)
print(rl.fields)
assert rl.fields == ['NAME', 'amount', 'ID']
assert not isscalar(rl.dshape.measure)
assert isscalar(rlc.dshape.measure)
def test_relabel_join():
names = TableSymbol('names', '{first: string, last: string}')
siblings = join(names.relabel({'last': 'left'}),
names.relabel({'last': 'right'}), 'first')
assert siblings.fields == ['first', 'left', 'right']
def test_map():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
inc = lambda x: x + 1
assert isscalar(t['amount'].map(inc, schema='int').dshape.measure)
s = t['amount'].map(inc, schema='{amount: int}')
assert not isscalar(s.dshape.measure)
assert s.dshape == dshape('var * {amount: int}')
expr = (t[['name', 'amount']]
.map(identity, schema='{name: string, amount: int}'))
assert expr._name is None
@pytest.mark.xfail(reason="Not sure that we should even support this")
def test_map_without_any_info():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert iscolumn(t['amount'].map(inc, 'int'))
assert not iscolumn(t[['name', 'amount']].map(identity))
def test_apply():
t = Symbol('t', 'var * {name: string, amount: int32, id: int32}')
s = t['amount'].apply(sum, dshape='real')
r = t['amount'].apply(sum, dshape='3 * real')
assert s.dshape == dshape('real')
assert r.schema == dshape('real')
def test_TableSymbol_printing_is_legible():
accounts = TableSymbol('accounts', '{name: string, balance: int, id: int}')
expr = (exp(accounts.balance * 10)) + accounts['id']
assert "exp(accounts.balance * 10)" in str(expr)
assert "+ accounts.id" in str(expr)
def test_merge():
t = TableSymbol('t', 'int64')
p = TableSymbol('p', '{amount:int}')
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts.balance * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c.fields == ['name', 'balance', 'new']
assert c.schema == dshape('{name: string, balance: int32, new: float64}')
with pytest.raises(ValueError):
merge(t, t)
with pytest.raises(ValueError):
merge(t, p)
def test_merge_repeats():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
with pytest.raises(ValueError):
merge(accounts, (accounts.balance + 1).label('balance'))
def test_merge_project():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts['balance'] * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c['new'].isidentical(new_amount)
assert c['name'].isidentical(accounts['name'])
assert c[['name', 'new']].isidentical(merge(accounts.name, new_amount))
inc = lambda x: x + 1
def test_subterms():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert list(a._subterms()) == [a]
assert set(a['x']._subterms()) == set([a, a['x']])
assert set(a['x'].map(inc, 'int')._subterms()) == \
set([a, a['x'], a['x'].map(inc, 'int')])
assert a in set((a['x'] + 1)._subterms())
def test_common_subexpression():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert common_subexpression(a).isidentical(a)
assert common_subexpression(a, a['x']).isidentical(a)
assert common_subexpression(a['y'] + 1, a['x']).isidentical(a)
assert common_subexpression(a['x'].map(inc, 'int'), a['x']).isidentical(a['x'])
def test_schema_of_complex_interaction():
a = TableSymbol('a', '{x: int, y: int, z: int}')
expr = (a['x'] + a['y']) / a['z']
assert expr.schema == dshape('float64')
expr = expr.label('foo')
assert expr.schema == dshape('float64')
def iscolumn(x):
return isscalar(x.dshape.measure)
def test_iscolumn():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert not iscolumn(a)
assert iscolumn(a['x'])
assert not iscolumn(a[['x', 'y']])
assert not iscolumn(a[['x']])
assert iscolumn((a['x'] + a['y']))
assert iscolumn(a['x'].distinct())
assert not iscolumn(a[['x']].distinct())
assert not iscolumn(by(a['x'], total=a['y'].sum()))
assert iscolumn(a['x'][a['x'] > 1])
assert not iscolumn(a[['x', 'y']][a['x'] > 1])
assert iscolumn(a['x'].sort())
assert not iscolumn(a[['x', 'y']].sort())
assert iscolumn(a['x'].head())
assert not iscolumn(a[['x', 'y']].head())
assert iscolumn(TableSymbol('b', 'int'))
assert not iscolumn(TableSymbol('b', '{x: int}'))
def test_discover():
schema = '{x: int, y: int, z: int}'
a = TableSymbol('a', schema)
assert discover(a) == var * schema
def test_improper_selection():
t = TableSymbol('t', '{x: int, y: int, z: int}')
assert raises(Exception, lambda: t[t['x'] > 0][t.sort()[t['y' > 0]]])
def test_serializable():
t = TableSymbol('t', '{id: int, name: string, amount: int}')
import pickle
t2 = pickle.loads(pickle.dumps(t))
assert t.isidentical(t2)
s = TableSymbol('t', '{id: int, city: string}')
expr = join(t[t.amount < 0], s).sort('id').city.head()
expr2 = pickle.loads(pickle.dumps(expr))
assert expr.isidentical(expr2)
def test_table_coercion():
from datetime import date
t = TableSymbol('t', '{name: string, amount: int, timestamp: ?date}')
assert (t.amount + '10').rhs == 10
assert (t.timestamp < '2014-12-01').rhs == date(2014, 12, 1)
def test_isnan():
from blaze import isnan
t = TableSymbol('t', '{name: string, amount: real, timestamp: ?date}')
for expr in [t.amount.isnan(), ~t.amount.isnan()]:
assert eval(str(expr)).isidentical(expr)
assert iscollection(t.amount.isnan().dshape)
assert 'bool' in str(t.amount.isnan().dshape)
def test_distinct_name():
t = TableSymbol('t', '{id: int32, name: string}')
assert t.name.isidentical(t['name'])
assert t.distinct().name.isidentical(t.distinct()['name'])
assert t.id.distinct()._name == 'id'
assert t.name._name == 'name'
def test_leaves():
t = TableSymbol('t', '{id: int32, name: string}')
v = TableSymbol('v', '{id: int32, city: string}')
x = symbol('x', 'int32')
assert t._leaves() == [t]
assert t.id._leaves() == [t]
assert by(t.name, count=t.id.nunique())._leaves() == [t]
assert join(t, v)._leaves() == [t, v]
assert join(v, t)._leaves() == [v, t]
assert (x + 1)._leaves() == [x]
@pytest.fixture
def t():
return TableSymbol('t', '{id: int, amount: float64, name: string}')
def funcname(x, y='<lambda>'):
if PY3:
return 'TestRepr.%s.<locals>.%s' % (x, y)
return 'test_table.%s' % y
class TestRepr(object):
def test_partial_lambda(self, t):
expr = t.amount.map(partial(lambda x, y: x + y, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_schema=None, _name0=None)" %
funcname('test_partial_lambda'))
def test_lambda(self, t):
expr = t.amount.map(lambda x: x)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _schema=None, _name0=None)" %
funcname('test_lambda'))
def test_partial(self, t):
def myfunc(x, y):
return x + y
expr = t.amount.map(partial(myfunc, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_schema=None, _name0=None)" % funcname('test_partial',
'myfunc'))
def test_builtin(self, t):
expr = t.amount.map(datetime.fromtimestamp)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=datetime.fromtimestamp, _schema=None,"
" _name0=None)")
def test_udf(self, t):
def myfunc(x):
return x + 1
expr = t.amount.map(myfunc)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _schema=None,"
" _name0=None)" % funcname('test_udf', 'myfunc'))
def test_nested_partial(self, t):
def myfunc(x, y, z):
return x + y + z
f = partial(partial(myfunc, 2), 1)
expr = t.amount.map(f)
s = str(expr)
assert s == ("Map(_child=t.amount, func=partial(partial(%s, 2), 1),"
" _schema=None, _name0=None)" %
funcname('test_nested_partial', 'myfunc'))
def test_count_values():
t = TableSymbol('t', '{name: string, amount: int, city: string}')
assert t.name.count_values(sort=False).isidentical(
by(t.name, count=t.name.count()))
assert t.name.count_values(sort=True).isidentical(
by(t.name, count=t.name.count()).sort('count', ascending=False))
def test_dir():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert 'day' in dir(t.dt)
assert 'mean' not in dir(t.dt)
assert 'mean' in dir(t.amount)
assert 'like' not in dir(t[['amount', 'dt']])
assert 'any' not in dir(t.name)
def test_distinct_column():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.name.distinct().name.dshape == t.name.distinct().dshape
assert t.name.distinct().name.isidentical(t.name.distinct())
def test_columns_attribute_for_backwards_compatibility():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.columns == t.fields
assert 'columns' in dir(t)
assert 'columns' not in dir(t.name)
|
bsd-3-clause
|
rpp0/lora-phy-fingerprinting
|
tf_train.py
|
1
|
73515
|
#!/usr/bin/python2
# tf_train.py
#
# Collection of ML algorithms to fingerprint radio devices using Tensorflow.
# A high level overview of the functionality provided by this code is given in
# the paper entitled "Physical-Layer Fingerprinting of LoRa devices using
# Supervised and Zero-Shot Learning", which was presented at WiSec 2017. A VM
# containing the training data and scripts required to reproduce the results
# from our paper will be published on Zenodo. Please contact one of the authors
# for more information.
#
# The code provides an abstraction layer on top of Tensorflow, consisting of
# "Models" and "Layers", in order to build a "Classifier" for raw radio signals.
# If you plan on using this framework for your research, I would recommend using
# the library "Keras" to build the models instead of "raw" Tensorflow. Keras was
# developed concurrently with this work, and provides a more concise and mature
# implementation for the same types of models that are used here.
#
# Author: Pieter Robyns
# Contact: [email protected]
import tensorflow as tf
import colorama
import random
import numpy as np
import scipy.io as sio
import os
import configparser
import argparse
import preprocessing
import visualization
import pickle
import json
import sklearn
import utilities
from colorama import Fore,Back,Style
from pymongo import MongoClient
from pymongo.errors import OperationFailure, AutoReconnect
from scipy import stats
from sklearn.manifold import TSNE
from sklearn.svm import SVC
from mapping import Mapping
from cache import GenericCache
from datetime import datetime
from random import randint
from tensorflow.contrib.tensorboard.plugins import projector
from sklearn.cluster import DBSCAN
from itertools import combinations
from collections import defaultdict
# ----------------------------------------------------
# Globals
# ----------------------------------------------------
colorama.init(autoreset=True)
EPSILON = 0.00000000001
defaults = {
'exclude_classes': '',
'epochs': -1,
'num_zs_test_samples': 40,
}
cp = configparser.RawConfigParser(defaults)
flags = tf.app.flags
FLAGS = flags.FLAGS
# ----------------------------------------------------
# Static functions
# ----------------------------------------------------
def load_conf(conf): # Configure the classifier using settings from conf file
cp.read(conf)
# Flags
flags.DEFINE_string('logdir', '/tmp/tensorboard', 'Tensorboard summaries directory')
flags.DEFINE_string('trainedmodelsdir', cp.get("DEFAULT", "trained_models_path"), 'Trained models directory')
flags.DEFINE_string('dataset', cp.get("DEFAULT", "dataset"), 'Dataset type (mongo or matlab)')
flags.DEFINE_string('classifier', cp.get("DEFAULT", "classifier"), 'Type of classifier to use')
flags.DEFINE_string('clustering', cp.get("DEFAULT", "clustering"), 'Type of clustering to use if doing open set classification')
flags.DEFINE_string('model_name', cp.get("DEFAULT", "model_name"), 'Name of the experiment / model. Used for saving it')
flags.DEFINE_integer('limit', cp.getint("DEFAULT", "limit"), 'Limit input tensor to n samples')
flags.DEFINE_integer('num_train_samples', cp.getint("DEFAULT", "num_train_samples"), 'Number of training samples')
flags.DEFINE_integer('num_test_samples', cp.getint("DEFAULT", "num_test_samples"), 'Number of test samples')
flags.DEFINE_integer('num_zs_test_samples', cp.getint("DEFAULT", "num_zs_test_samples"), 'Number of zero shot test samples')
flags.DEFINE_integer('batch_size', cp.getint("DEFAULT", "batch_size"), 'Training batch size')
flags.DEFINE_integer('print_step', cp.getint("DEFAULT", "print_step"), 'Print step')
flags.DEFINE_integer('epochs', cp.getint("DEFAULT", "epochs"), 'Epochs to train')
flags.DEFINE_integer('sampling_freq', cp.getint("DEFAULT", "sampling_freq"), 'Sampling frequency')
flags.DEFINE_string('mode', cp.get("DEFAULT", "mode"), 'Analysis mode (ifreq, iphase, or fft)')
flags.DEFINE_float('keep_prob', cp.getfloat("DEFAULT", "keep_prob"), 'Probability to keep neuron when using CNN')
flags.DEFINE_integer('retrain_batch', cp.getint("DEFAULT", "retrain_batch"), 'Number of times to retrain the same batch (speeds up, but also overfits)')
flags.DEFINE_string('exclude_classes', cp.get("DEFAULT", "exclude_classes"), 'Classes to exclude from training')
# Mode specific options
if cp.get("DEFAULT", "dataset") == 'matlab': # TODO: Bug in Tensorflow: once FLAGS.dataset is accessed it's no longer possible to define new strings
flags.DEFINE_string('matlabfile', cp.get("matlab", "matlabfile"), 'MATLAB LoRa database')
flags.DEFINE_integer('chirp_length', cp.getint("matlab", "chirp_length"), 'Length of a single chirp')
elif cp.get("DEFAULT", "dataset") == 'mongo':
flags.DEFINE_string ('ip', cp.get("mongo", "ip"), 'MongoDB server IP')
flags.DEFINE_integer('port', cp.get("mongo", "port"), 'MongoDB server port')
flags.DEFINE_string ('db', cp.get("mongo", "db"), 'MongoDB database name')
flags.DEFINE_string ('collection', cp.get("mongo", "collection"), 'MongoDB chirp collection name')
flags.DEFINE_string ('test_collection', cp.get("mongo", "test_collection"), 'MongoDB test chirp collection name')
flags.DEFINE_integer ('random_mode', RandomMode.s2e(cp.get("mongo", "random_mode")), 'Data randomization approach')
flags.DEFINE_string ('random_date', cp.get("mongo", "random_date"), 'Date for split date mode')
flags.DEFINE_string ('filter', cp.get("mongo", "filter"), 'Query filter for "find" queries')
elif cp.get("DEFAULT", "dataset") == 'random':
flags.DEFINE_integer('num_classes', cp.get("random", "num_classes"), 'Number of random classes')
flags.DEFINE_integer('num_samples', cp.get("random", "num_samples"), 'Number of random samples')
# Classifier specific options
if cp.get("DEFAULT", "classifier") == 'mlp':
flags.DEFINE_integer('num_hidden_layers', cp.getint("mlp", "num_hidden_layers"), 'Number of hidden layers')
flags.DEFINE_integer('num_hidden_neurons', cp.getint("mlp", "num_hidden_neurons"), 'Number of hidden neurons in a hidden layer')
elif cp.get("DEFAULT", "classifier") == 'cnn':
flags.DEFINE_integer('conv_kernel_width', cp.getint("cnn", "conv_kernel_width"), 'Convolution kernel width')
flags.DEFINE_integer('pooling_kernel_width', cp.getint("cnn", "pooling_kernel_width"), 'Max pooling kernel width')
elif cp.get("DEFAULT", "classifier") == 'mdn':
flags.DEFINE_integer('num_hidden_layers', cp.getint("mdn", "num_hidden_layers"), 'Number of hidden layers')
flags.DEFINE_integer('num_hidden_neurons', cp.getint("mdn", "num_hidden_neurons"), 'Number of hidden neurons in a hidden layer')
def print_conf(cp): # Print settings to terminal
for e in cp.defaults():
print("[+] " + Fore.YELLOW + Style.BRIGHT + e + ": " + str(cp.get("DEFAULT", e)))
def select_cols(matrix, c1, c2): # Select two columns from a numpy matrix
return matrix[:, [c1, c2]]
# ----------------------------------------------------
# Dataset classes
# ----------------------------------------------------
class TensorIO():
def __init__(self, x, y):
self.x = x # Input
self.y = y # Output
class Dataset(): # Dataset base class
def __init__(self):
self.num_training_samples = FLAGS.num_train_samples
self.num_test_samples = FLAGS.num_test_samples
# Based on the tag, get the LoRa ID
def _determine_id(self, tag):
if 'lora' in tag:
lora_id = int(tag[4:])
return lora_id
print("[!] Warning: unable to determine lora_id for entry " + str(tag))
return None
# Preprocess an input so that it can be learned by Tensorflow
def _data_to_tf_record(self, lora_id, chirp, debug=False):
features = []
#visualization.dbg_plot(preprocessing.iphase(chirp), title='Preprocessed chirp')
chirp = preprocessing.roll_to_base(chirp)
for m in FLAGS.mode.split(','):
if m == 'iphase':
features.append(preprocessing.iphase(chirp))
elif m == 'fft':
features.append(preprocessing.fft(chirp))
elif m == 'ifreq':
features.append(preprocessing.ifreq(chirp, FLAGS.sampling_freq))
elif m == 'iamp':
features.append(preprocessing.iamp(chirp))
elif m == 'raw':
features.append(preprocessing.normalize(chirp))
else:
print(Fore.RED + Style.BRIGHT + "[-] Analysis mode must be configured to be either 'fft', 'iphase', 'ifreq', or a comma separated combination.")
exit(1)
if debug:
if lora_id == 1:
visualization.dbg_plot(features[0], title='First feature vector of LoRa 1 chirp')
tf_record = {"lora_id": lora_id, "iq": features}
return tf_record
class GNURadioDataset(Dataset): # Convert pmt of IQ samples to numpy complex 64
def __init__(self, pmt, symbol_length):
self.pmt = pmt
self.symbol_length = symbol_length
def get(self):
data = []
frame = np.frombuffer(self.pmt, dtype=np.complex64)
symbols = [frame[i:i+self.symbol_length] for i in range(0, len(frame), self.symbol_length)]
for symbol in symbols:
tf_record = self._data_to_tf_record(None, symbol)
data.append(tf_record)
return data
class FakeSampleDataset(Dataset):
def __init__(self, host='localhost', port=27017, name="chirps"):
Dataset.__init__(self)
self.name = name
def get(self, projection={}, num_records=500):
return [{"lora_id": 1, "iq": [0+0j] * 74200}] * num_records
class UniformRandomDataset(Dataset): # Sanity check dataset
def __init__(self):
Dataset.__init__(self)
self.num_classes = FLAGS.num_classes
self.lora_ids = set()
for i in range(1, self.num_classes+1):
self.lora_ids.add(i)
def get(self, projection={}):
data = []
for i in range(0, FLAGS.num_samples):
record = {"lora_id": random.randint(1,self.num_classes), "iq": [random.random() for x in range(0, FLAGS.limit)]}
data.append(record)
return data
class MatlabDataset(Dataset):
def __init__(self):
Dataset.__init__(self)
self.path = FLAGS.matlabfile
self.data = []
self.lora_ids = set()
# Load the file and contents
mat_contents = sio.loadmat(self.path)
self.all_samples = mat_contents['all_samples']
# Determine number of classes
for entry in self.all_samples:
entry_name = os.path.basename(entry[0][0])
lora_id = self._determine_id(entry_name)
if lora_id is None:
continue
self.lora_ids.add(lora_id)
def _determine_id(self, filename):
for elem in filename.split('-'):
if 'lora' in elem:
return Dataset._determine_id(self, elem)
def get(self, projection={}, num_records=0): # TODO: projection
data = []
# Parse class data
for entry in self.all_samples:
entry_name = os.path.basename(entry[0][0])
entry_data = entry[1]
lora_id = self._determine_id(entry_name)
if lora_id is None:
continue
print("Parsing " + entry_name + " (class " + str(lora_id) + ", " + str(len(entry_data)) + " samples)")
for record in entry_data:
for i in range(0, 8):
chirp = record[i*FLAGS.chirp_length:(i+1)*FLAGS.chirp_length]
tf_record = self._data_to_tf_record(lora_id, chirp, debug=args.debug)
data.append(tf_record)
return data
class RandomMode:
RANDOMIZE_SYMBOLS = 0
RANDOMIZE_FRAMES = 1
SPLIT_DATE = 2
SPLIT_COLLECTION = 3
_STR_RANDOMIZE_SYMBOLS = 'randomize_symbols'
_STR_RANDOMIZE_FRAMES = 'randomize_frames'
_STR_SPLIT_DATE = 'split_date'
_STR_SPLIT_COLLECTION = 'split_collection'
@staticmethod
def e2s(enum):
if enum == RandomMode.RANDOMIZE_SYMBOLS:
return RandomMode._STR_RANDOMIZE_SYMBOLS
elif enum == RandomMode.RANDOMIZE_FRAMES:
return RandomMode._STR_RANDOMIZE_FRAMES
elif enum == RandomMode.SPLIT_DATE:
return RandomMode._STR_SPLIT_DATE
elif enum == RandomMode.SPLIT_COLLECTION:
return RandomMode._STR_SPLIT_COLLECTION
else:
print(Fore.YELLOW + Style.BRIGHT + "[!] Warning: unknown enum %d. Defaulting to 0." % enum)
return 0
@staticmethod
def s2e(string):
if string == RandomMode._STR_RANDOMIZE_SYMBOLS:
return RandomMode.RANDOMIZE_SYMBOLS
elif string == RandomMode._STR_RANDOMIZE_FRAMES:
return RandomMode.RANDOMIZE_FRAMES
elif string == RandomMode._STR_SPLIT_DATE:
return RandomMode.SPLIT_DATE
elif string == RandomMode._STR_SPLIT_COLLECTION:
return RandomMode.SPLIT_COLLECTION
else:
print(Fore.YELLOW + Style.BRIGHT + "[!] Warning: unknown randomization mode '%s'. Defaulting to randomize_symbols." % string)
return RandomMode.RANDOMIZE_SYMBOLS
class MongoDataset(Dataset):
def __init__(self):
Dataset.__init__(self)
self.ip = FLAGS.ip
self.port = FLAGS.port
self.client = MongoClient(self.ip, self.port)
self.db = self.client[FLAGS.db]
self.collection = self.db[FLAGS.collection]
self.collection_test = self.db[FLAGS.test_collection]
self.lora_ids = set()
self.random_mode = FLAGS.random_mode
self.filter = json.loads(FLAGS.filter)
self.num_samples = self.collection.find(self.filter).count()
print(Fore.MAGENTA + Style.BRIGHT + "[+] Filter: %s" % str(self.filter))
self.sort = '$natural' if args.natural else 'rand'
# Randomize mongo set
self.randomize()
# Randomize all symbols and divide into training and test set
if self.random_mode == RandomMode.RANDOMIZE_SYMBOLS:
self.cursor_train = self.collection.find(self.filter).sort(self.sort, 1).skip(0).limit(self.num_training_samples)
self.cursor_test = self.collection.find(self.filter).sort(self.sort, 1).skip(self.num_training_samples).limit(self.num_test_samples)
elif self.random_mode == RandomMode.RANDOMIZE_FRAMES:
self.collection.create_index("fn")
# Find out how many test frames we need
frames_for_test = int(self.num_test_samples / 36) # 36 = number of symbols in frame
# Find highest frame number
print("[+] Finding highest frame number")
last_fn = self.collection.find(self.filter).sort("fn", -1).limit(1)[0]['fn']
# Generate list of random frame numbers to be used as test set
test_fns = []
for i in range(0, frames_for_test):
test_fns.append(randint(0, last_fn))
# Assign the cursors
train_query = self.filter.copy()
train_query["fn"] = {"$nin": test_fns}
self.cursor_train = self.collection.find(train_query).sort(self.sort, 1).limit(self.num_training_samples)
test_query = self.filter.copy()
test_query["fn"] = {"$in": test_fns}
self.cursor_test = self.collection.find(test_query).sort(self.sort, 1).limit(self.num_test_samples)
elif self.random_mode == RandomMode.SPLIT_DATE:
self.collection.create_index("date")
print("[+] Splitting test set after date: %s" % FLAGS.random_date)
the_date = datetime.strptime(FLAGS.random_date,'%Y-%m-%dT%H:%M:%SZ')
train_query = self.filter.copy()
train_query["date"] = {"$lt": the_date}
self.cursor_train = self.collection.find(train_query).sort(self.sort, 1).limit(self.num_training_samples)
test_query = self.filter.copy()
test_query["date"] = {"$gte": the_date}
self.cursor_test = self.collection.find(test_query).sort(self.sort, 1).limit(self.num_test_samples)
elif self.random_mode == RandomMode.SPLIT_COLLECTION:
self.cursor_train = self.collection.find(self.filter).sort(self.sort, 1).limit(self.num_training_samples)
self.cursor_test = self.collection_test.find(self.filter).sort(self.sort, 1).limit(self.num_test_samples)
# Determine number of classes
print("[+] Determining number of classes")
for tag in self.cursor_train.distinct('tag'):
lora_id = self._determine_id(tag)
if lora_id is None:
continue
self.lora_ids.add(lora_id)
self.cursor_train.rewind()
# Create caches
self.cache_train = GenericCache(name="train")
self.cache_test = GenericCache(name="test")
def randomize(self):
if os.path.isfile('/tmp/randomized_mongo'):
print("[+] MongoDB dataset is already randomized")
return
self._randomize(self.collection, "")
if self.random_mode == RandomMode.SPLIT_COLLECTION: # If random mode is set to split collection, also randomize this collection
self._randomize(self.collection_test, "(test set)")
with open('/tmp/randomized_mongo', "w") as f:
f.write('')
def _randomize(self, collection, label=""):
print("[+] Randomizing MongoDB dataset %s" % label)
progress = 0
for entry in collection.find(self.filter):
collection.update({"_id": entry["_id"]}, {"$set": {"rand": random.random()}}, upsert=False, multi=False)
progress += 1
print("\r[+] Progress: %d / %d (estimation) " % (progress, self.num_samples)),
print("")
print("[+] Creating index")
collection.create_index("rand")
def get(self, train=True, projection={}, num_records=1000):
data = []
set_in_memory = False
if train:
cursor = self.cursor_train
cache = self.cache_train
num_records_total = self.num_training_samples
else:
cursor = self.cursor_test
cache = self.cache_test
num_records_total = self.num_test_samples
if len(cache) == num_records_total:
set_in_memory = True
# Set is already loaded in cache memory
if set_in_memory:
for i in range(0, num_records):
try:
tf_record = cache.next()
except StopIteration:
cache.rewind()
tf_record = cache.next()
data.append(tf_record)
else: # Go through each record in the MongoDB
for i in range(0, num_records):
try:
record = cursor.next()
except StopIteration:
cursor.rewind()
record = cursor.next()
except (OperationFailure, AutoReconnect) as e:
print("[!] Warning: Got other exception than StopIteration: "),
print(e)
cursor.rewind()
record = cursor.next()
lora_id = self._determine_id(record['tag'])
if lora_id is None:
continue
tf_record = cache.get(record['_id'])
if tf_record is None:
chirp = np.frombuffer(record['chirp'], dtype=np.complex64)
tf_record = self._data_to_tf_record(lora_id, chirp, debug=args.debug)
cache.store(record['_id'], tf_record)
data.append(tf_record)
return data
# The Instances class is responsible for providing:
# - Preprocessing of the raw chirp data into features
# - Separation of dataset into training and test sets
# - Random shuffling of training and test data
class Instances():
def __init__(self, limit=None, exclude_classes=[], name="", mapping=None):
self.name = name
self.num_excluded_samples = 0
self.limit = limit
self.exclude_classes = exclude_classes
# Select dataset type
if cp.get("DEFAULT", "dataset") == 'matlab':
self.dataset = MatlabDataset()
elif cp.get("DEFAULT", "dataset") == 'mongo':
self.dataset = MongoDataset()
elif cp.get("DEFAULT", "dataset") == 'random':
self.dataset = UniformRandomDataset()
else:
print(Fore.RED + Style.BRIGHT + "[-] Unknown dataset type '" + cp.get("DEFAULT", "dataset") + "'. Exiting")
exit(1)
# Make sure we don't underestimate available data
print("[+] Got " + Fore.GREEN + Style.BRIGHT + str(self.dataset.num_samples) + Style.RESET_ALL + " samples")
if self.dataset.num_test_samples + self.dataset.num_training_samples > self.dataset.num_samples:
print(Fore.RED + Style.BRIGHT + "[-] Sum of training and test samples exceeds available samples. Exiting")
exit(1)
# Get length of input samples (= number of features) and configure limit
print("[+] Getting number of features (1 record get from test set)")
self.num_features = self._get_num_features(self.dataset.get(train=False, num_records=1))
if self.limit == -1 or self.limit is None:
self.limit = self.num_features
print("[+] First sample contains %d features (limited to %d)" % (self.num_features, self.limit))
# Create mapping from LoRa ID to One Hot Vector if necessary
if mapping is None:
self.mapping = Mapping(self.dataset.lora_ids, exclude_classes=self.exclude_classes)
self.mapping.display()
else: # Update existing map with any new entries found
self.mapping = mapping
self.mapping.update(self.dataset.lora_ids, exclude_classes=self.exclude_classes)
self.mapping.display()
def next_batch(self, train, size):
temp = list(self.dataset.get(train=train, num_records=size))
if len(temp) > 0:
# Randomize (already done in Mongo, but not for other datasets)
random.shuffle(temp)
# Create instances
instances_x = []
instances_y = []
for i in range(0, size):
processed_record = self.process_record(temp[i])
if not (processed_record is None):
instances_x.append(processed_record.x[0:self.limit])
instances_y.append(processed_record.y)
instances_x = np.array(instances_x, dtype=np.float32)
instances_y = np.array(instances_y, dtype=np.float32)
# Done!
#if len(self.exclude_classes) > 0:
# print(Fore.GREEN + Style.BRIGHT + "[+] EXCLUDING %d samples" % self.num_excluded_samples)
else:
print("[-] No samples found in dataset. Exiting")
exit(1)
if len(instances_x) == 0:
raise Exception
return instances_x, instances_y
def _get_num_features(self, x):
return len(np.array(x[0]["iq"]).flatten())
def process_record(self, record):
# Do some preprocessing on the records here
if record["lora_id"] in self.exclude_classes:
self.num_excluded_samples += 1
return None
one_hot_vector = self.mapping.lora_id_to_oh(record["lora_id"])
features = np.array(record["iq"]).flatten()
return TensorIO(features, one_hot_vector)
# ----------------------------------------------------
# ML models
# Some of these models are based on the reference im-
# plementations provided by Aymeric Damien. See
# https://github.com/aymericdamien/TensorFlow-Examples
# for more information.
# ----------------------------------------------------
class MLModel(): # Base class for ML models
def __init__(self):
self.learning_rate = None
self.layers = []
self.output_layer = None
self.cost_function = None
self.correct_prediction = None
class MLPModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, hidden_layers=0, hidden_neurons=0, name='mlp'):
MLModel.__init__(self)
self.learning_rate = 0.0001 #0.001 works pretty good too
next_layer = x
next_layer_size = num_inputs
for i in range(0, hidden_layers):
self.layers.append(LinearReluLayer(next_layer, next_layer_size, hidden_neurons, name=name+'lin' + str(i)))
self.output_layer = self.layers[-1]
next_layer = self.output_layer.h
next_layer_size = hidden_neurons
self.layers.append(LinearLayer(next_layer, next_layer_size, num_classes, name=name+'clin', init_zero=True)) # Since it will be softmaxed later, init to zero. Seems to affect training speed and making the weights align on a diagonal faster
self.output_layer = self.layers[-1]
#self.cost_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf.nn.softmax(self.output_layer.h)+EPSILON), reduction_indices=[1])) # Doesn't deal with edge cases so we need to add EPSILON
self.cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.h, labels=y))
#self.cost_function = tf.reduce_mean(tf.reduce_sum(tf.square(y - tf.nn.softmax(self.output_layer.h)), reduction_indices=[1]))
self.correct_prediction = tf.equal(tf.argmax(self.output_layer.h,1), tf.argmax(y,1))
class ConvNeuralNetModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, keep_prob=None, name='cnn'):
MLModel.__init__(self)
self.learning_rate = 0.001 # 0.0001
# Make image
x_shaped = tf.reshape(x, shape=[-1, 1, num_inputs, 1])
# Append convolution layers
self.layers.append(NNLayer(x_shaped, [1, FLAGS.conv_kernel_width, 1, 32], [32], name=name+'wc1'))
self.output_layer = self.layers[-1]
self.layers.append(NNLayer(self.output_layer.h, [1, FLAGS.conv_kernel_width, 32, 64], [64], name=name+'wc2'))
self.output_layer = self.layers[-1]
# Reshape conv2 output to fit fully connected layer input
relu_inputs = (num_inputs/pow(FLAGS.pooling_kernel_width, 2))*64 # 64 = output channels per sample from conv. 4 = k from polling (see paper notes). Power of two because max pooling twice
relu_outputs = num_inputs
out_shaped = tf.reshape(self.output_layer.h, [-1, relu_inputs])
# Append fully connected layer
self.layers.append(LinearReluDropLayer(out_shaped, relu_inputs, relu_outputs, keep_prob))
self.output_layer = self.layers[-1]
# Output, class prediction
self.layers.append(LinearLayer(self.output_layer.h, relu_outputs, num_classes, name=name+'lin'))
self.output_layer = self.layers[-1]
self.cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.h, labels=y))
self.correct_prediction = tf.equal(tf.argmax(self.output_layer.h,1), tf.argmax(y,1))
class MDNModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, hidden_layers=0, hidden_neurons=0, name='mdn'):
MLModel.__init__(self)
self.num_classes = num_classes
self.learning_rate = 0.001
next_layer = x
next_layer_size = num_inputs
# Hidden layers
for i in range(0, hidden_layers):
self.layers.append(LinearLayer(next_layer, next_layer_size, hidden_neurons, name=name+'lin' + str(i)))
self.output_layer = self.layers[-1]
next_layer = self.output_layer.h
next_layer_size = hidden_neurons
# MDN layer
self.layers.append(MixtureLayer(next_layer, next_layer_size, num_classes, name=name+"mix"))
self.output_layer = self.layers[-1]
self.pi, self.mu, self.sigma = self._get_components(self.output_layer)
self.gauss = tf.contrib.distributions.Normal(mu=self.mu, sigma=self.sigma)
# Cost function
self.cost_function = self._get_cost_function(y)
# Evaluation
self.correct_prediction = tf.equal(tf.argmax(tf.mul(self.pi,self.gauss.mean()), 1), tf.argmax(y,1))
def _get_components(self, layer):
pi = tf.placeholder("float", [None, layer.num_components])
mu = tf.placeholder("float", [None, layer.num_components])
sigma = tf.placeholder("float", [None, layer.num_components])
pi, mu, sigma = tf.split(1, layer.num_components, layer.h)
pi = tf.nn.softmax(pi)
#assert_op = tf.Assert(tf.equal(tf.reduce_sum(pi), 1.), [pi])
#pi = tf.with_dependencies([assert_op], pi)
sigma = tf.exp(sigma)
return pi, mu, sigma
def _get_cost_function(self, y):
return tf.reduce_mean(-tf.log(tf.reduce_sum(tf.mul(self.pi, self.gauss.pdf(y)), 1, keep_dims=True)))
def _sample(self, n):
# Randomly sample x times according to pi distribution
mixture_indices = tf.reshape(tf.multinomial(tf.log(self.pi), n), [-1]) # Pi must be a log probability
# Sample all gaussian distributions x times
samples = tf.reshape(self.gauss.sample(n), [-1, self.num_classes])
# Select only the one according to pi
select_gaussians = tf.reduce_sum(tf.one_hot(mixture_indices, self.num_classes) * samples, 1)
return select_gaussians
def _mean(self):
# Get the indices of the most likely mixtures beloning to each x
mixture_indices = tf.argmax(self.pi, 1)
# Get the expected values of all gaussians
exp_values = self.gauss.mean()
# Get expected value of most likely mixture
select_exp = tf.reduce_sum(tf.one_hot(mixture_indices, self.num_classes) * exp_values, 1)
return select_exp
class ModelType:
MLP = 0
CONVNET = 1
MDN = 2
@staticmethod
def str2type(string):
if string == "mlp":
return ModelType.MLP
elif string == "cnn":
return ModelType.CONVNET
elif string == "mdn":
return ModelType.MDN
else:
print(Fore.RED + Style.BRIGHT + "[-] Model type "+ string +" does not exist.")
exit(1)
# ----------------------------------------------------
# ML classifiers
# ----------------------------------------------------
class SVM():
def __init__(self, name="svc"):
print("[+] SVM Classifier")
self.m = SVC()
self.name = name
def _get_lora_id_labels(self, instances, oh_labels):
result = []
for i in range(0, len(oh_labels)):
result.append(instances.mapping.oh_to_lora_id(oh_labels[i]))
return result
def _to_vendor(self, instances, lora_id_labels):
result = []
for i in range(0, len(lora_id_labels)):
result.append(instances.mapping.lora_id_to_vendor_id(lora_id_labels[i]))
return result
def train(self, instances, batch_size=2500):
print("[+] Getting %d training samples" % batch_size)
train_samples_x, train_samples_y = instances.next_batch(True, batch_size)
train_samples_y = self._get_lora_id_labels(instances, train_samples_y)
print("[+] Training model")
self.m.fit(train_samples_x, train_samples_y)
def save(self):
path = FLAGS.trainedmodelsdir + self.name + "/"
if not os.path.exists(path):
os.makedirs(path)
# Save model
pickle.dump(self.m, open(path + 'svc_model.p', "wb"))
@staticmethod
def load():
path = FLAGS.trainedmodelsdir + FLAGS.model_name + "/"
# Set up classifier based on config and stored data
net = SVM()
net.m = pickle.load(open(path + 'svc_model.p', "rb"))
return net
def bin_class_per_sample(self, instances, limit=200, adv_detect=True, vendor_only=False):
print("[+] Getting %d test samples" % limit)
test_samples_x, test_samples_y = instances.next_batch(False, limit)
test_samples_y = self._get_lora_id_labels(instances, test_samples_y)
print("[+] Evaluating model")
predicted_y = self.m.predict(test_samples_x)
if vendor_only:
metrics = utilities.get_eval_metrics_percent(self._to_vendor(instances, test_samples_y), self._to_vendor(instances, predicted_y))
else:
metrics = utilities.get_eval_metrics_percent(test_samples_y, predicted_y)
utilities.print_metrics(metrics)
return
def visualize_embeddings(self, instances, limit=200, train=True):
print("[!] Warning: visualize_embeddings not implemented for SVM")
return
class Classifier():
# Build the classifier
def __init__(self, num_inputs, num_classes, name, modeltype=ModelType.MLP):
self.num_inputs = num_inputs
self.num_classes = num_classes
self.name = name
self.step = 0
self.modeltype = modeltype
self.expected_values = None
self.std = None
self.distance_threshold = np.zeros(num_classes)
self.sess = None
self.instances_mapping = None
model_summaries = []
self.x = tf.placeholder("float", [None, self.num_inputs], name='inputs')
self.y = tf.placeholder("float", [None, self.num_classes], name='map-id-oh')
self.keep_prob = tf.placeholder(tf.float32, name='dropout')
if modeltype == ModelType.MLP:
self.m = MLPModel(self.x, self.num_inputs, self.y, self.num_classes, hidden_layers=FLAGS.num_hidden_layers, hidden_neurons=FLAGS.num_hidden_neurons, name="mlp") # Build MLP model
elif modeltype == ModelType.CONVNET:
self.m = ConvNeuralNetModel(self.x, self.num_inputs, self.y, self.num_classes, keep_prob=self.keep_prob, name="cnn") # Build Convolutional Neural Network model
elif modeltype == ModelType.MDN:
self.m = MDNModel(self.x, self.num_inputs, self.y, self.num_classes, hidden_layers=FLAGS.num_hidden_layers, hidden_neurons=FLAGS.num_hidden_neurons, name="mdn") # Build MDN model
else:
raise Exception("No model type specified")
# Define optimizer
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.m.learning_rate).minimize(self.m.cost_function)
# Define accuracy model
self.accuracy = tf.reduce_mean(tf.cast(self.m.correct_prediction, tf.float32))
# Merge TensorBoard summaries for the model
model_summaries.append(tf.summary.scalar('accuracy', self.accuracy))
model_summaries.append(tf.summary.scalar('cost', self.m.cost_function))
self.merged_model_summaries = tf.summary.merge(model_summaries, collections=None, name=None)
# Define session object and summary writers
self.sess = tf.Session()
self.train_writer = tf.summary.FileWriter(FLAGS.logdir + '/train', graph=self.sess.graph)
self.test_writer = tf.summary.FileWriter(FLAGS.logdir + '/test')
def __del__(self):
if not (self.sess is None):
self.sess.close()
self.train_writer.close()
self.test_writer.close()
# Plot sample data to Tensorboard
def _plot_samples(self, samples_x, samples_y):
# Register plot summaries
plot_summaries = []
plots_to_show = 5
learned_weights_tensor = tf.identity(self.m.output_layer.W)
learned_weights = self.sess.run(learned_weights_tensor)
plot_summaries.append(visualization.plot_values(samples_x[0], self.instances_mapping, height=500, width=self.num_inputs, tag="weights", title="Weights", label=np.argmax(samples_y[0]), backdrop=learned_weights))
for i in range(1, 6):
label = np.argmax(samples_y[i])
guess = self.get_accuracy([samples_x[i]], [samples_y[i]])
plot_summaries.append(visualization.plot_values(samples_x[i], self.instances_mapping, height=500, width=self.num_inputs, tag="trd" + str(i) + "c" + str(label) + "g" + str(guess), title="Training data", label=label))
# Merge TensorBoard summaries for plots
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
# Plot kernel data to Tensorboard
def _plot_kernels(self):
plot_summaries = []
# TODO go through layers and check .startswith("wc")
kernels_tensor = self.m.layers[0].W
kernels_shaped_tensor = tf.reshape(kernels_tensor, [-1, FLAGS.conv_kernel_width]) # Arrange kernels so that there is one per row
kernels_shaped = self.sess.run(kernels_shaped_tensor)
plot_summaries.append(visualization.plot_kernels(kernels_shaped, FLAGS.conv_kernel_width, height=4096, width=1024, tag="kernels", title="CNN Kernels"))
# Merge TensorBoard summaries for plots TODO dup code
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
def get_output_weights(self, samples_x):
return self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
def _plot_output_weights_2d(self, samples_x, samples_y, predictions_y, instances, metrics): # Do not use new samples from instances
plot_summaries = []
# Get the output weight values for all classes
output_weights = self.get_output_weights(samples_x)
# OLD: Get first two weights to visualize
# weights = select_cols(output_weights, 0, 1)
# Reduce dimensionality of weights to 2
tsne = TSNE(n_components=2, init='pca', n_iter=5000)
weights = tsne.fit_transform(output_weights)
#xlabel = "Weight #" + str(0) + " values"
#ylabel = "Weight #" + str(1) + " values"
xlabel = "t-SNE dimension 1"
ylabel = "t-SNE dimension 2"
plot_summaries.append(visualization.plot_weights(weights, samples_y, predictions_y, self.expected_values, self.distance_threshold, instances.mapping, tag=self.name+"-w", title="2D projection of output feature weights", xlabel=xlabel, ylabel=ylabel, metrics=metrics))
# Merge TensorBoard summaries for plots TODO dup code
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
def train(self, instances, batch_size=2500):
# Let's go
print("[+] Training")
self.sess.run(tf.global_variables_initializer())
# Start learning weights
try:
while True:
train_batch_x, train_batch_y = instances.next_batch(True, batch_size)
test_batch_x, test_batch_y = instances.next_batch(False, batch_size)
# Execute training step(s) on batch
#print(self.sess.run(self.m.tmp, feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: FLAGS.keep_prob})) # To test something inside model with the same data
for i in range(0, FLAGS.retrain_batch):
self.sess.run(self.optimizer, feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: FLAGS.keep_prob})
# Print progress
if self.step % FLAGS.print_step == 0:
# Print stats about step
summary_train, c_train, a_train = self.sess.run([self.merged_model_summaries, self.m.cost_function, self.accuracy], feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: 1.0})
summary_test = self.sess.run(self.merged_model_summaries, feed_dict={self.x: test_batch_x, self.y: test_batch_y, self.keep_prob: 1.0})
# Add summaries
self.train_writer.add_summary(summary_train, self.step)
self.test_writer.add_summary(summary_test, self.step)
# Print info about training
print("Epoch {:d}: cost={:.6f}, tr_acc={:.6f}, W0_0={:.6f}".format(self.step, c_train, a_train, self.sess.run(self.m.output_layer.W)[0][0]))
# Next step
self.step += 1
if self.step == FLAGS.epochs:
raise KeyboardInterrupt
except KeyboardInterrupt:
pass
# Save the mapping used during training from LoRa ID to Map ID
self.instances_mapping = instances.mapping
# Mixture components
self.expected_values, self.std = self.calculate_mixture_components(instances)
# Show results
print(Fore.GREEN + Style.BRIGHT + "[+] Done training!")
if self.modeltype == ModelType.MLP:
print(Fore.GREEN + Style.BRIGHT + "[+] Plotting training samples")
self._plot_samples(train_batch_x, train_batch_y)
else:
print(Fore.GREEN + Style.BRIGHT + "[+] Plotting model kernels")
self._plot_kernels()
# Evaluation
print("[+] Training set accuracy")
print(self.get_accuracy(train_batch_x, train_batch_y))
print("[+] Test set accuracy")
print(self.get_accuracy(test_batch_x, test_batch_y))
# Assert that nothing unexpected happened during the whole process
GenericCache.assert_disjunction(instances.dataset.cache_train, instances.dataset.cache_test)
print(Fore.GREEN + Style.BRIGHT + "[+] Training assertions passed")
def determine_ideal_threshold(self, map_id, samples_x, expected_values):
output_weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
threshold = 0.0
for output_weight in output_weights:
#threshold = max(np.linalg.norm(output_weight - expected_values), threshold)
#threshold = (np.linalg.norm(output_weight - expected_values) + threshold) / 2.0
threshold += np.linalg.norm(output_weight - expected_values)
threshold /= len(output_weights)
return threshold
def calculate_mixture_components(self, instances, num_samples_to_use=10000):
print("[+] Determining mixture model components")
train_batch_x, train_batch_y = instances.next_batch(True, num_samples_to_use)
expected_values = np.ndarray(shape=(self.num_classes,self.num_classes), dtype=np.float32)
std = np.ndarray(shape=(self.num_classes,self.num_classes), dtype=np.float32)
for lora_id in instances.mapping.keys():
map_id = instances.mapping.lora_to_map_id(lora_id)
samples_x = []
# Collect samples belonging to class map_id
for i in range(0, len(train_batch_x)):
if np.argmax(train_batch_y[i]) == map_id:
samples_x.append(train_batch_x[i])
if len(samples_x) == 0:
print(train_batch_y)
print("[-] Error: no samples in training set for LoRa %d. Dumped y training set" % lora_id)
exit()
# Determine mean and std deviation for all features
nn_output_weights = self.sess.run(tf.identity(self.m.output_layer.h), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
expected_values[map_id] = np.mean(nn_output_weights, axis=0)
std[map_id] = np.std(nn_output_weights, axis=0)
# Determine ideal threshold based on expected values
# this threshold is used when doing nearest neighbor classification
# as the outlier detection (not discussed in paper)
if args.distance_threshold == 'auto':
print("\r[+] Determining expected value distance threshold for LoRa %d " % lora_id),
self.distance_threshold[map_id] = self.determine_ideal_threshold(map_id, samples_x, expected_values[map_id])
else:
self.distance_threshold[map_id] = args.distance_threshold
# Clean up
del samples_x
print("")
return expected_values, std
# Calculates the distance between a point and a centroid
def calculate_expected_values_distance(self, samples_x):
if self.expected_values is None or self.distance_threshold is None:
raise Exception("Tried to evaluate expected value MSE without training values")
output_weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
distances = []
for output_weight_v in output_weights:
distances.append(np.linalg.norm(output_weight_v - self.expected_values, axis=1)) # Distance from E(X) for each class to X
return distances
def get_accuracy(self, samples_x, samples_y):
return self.sess.run(self.accuracy, feed_dict={self.x: samples_x, self.y: samples_y, self.keep_prob: 1.0})
def save(self):
path = FLAGS.trainedmodelsdir + self.name + "/"
if not os.path.exists(path):
os.makedirs(path)
# Save number of inputs
np.save(path + 'value-inputs', self.num_inputs)
# Save number of classes
np.save(path + 'value-classes', self.num_classes)
# Save layers
for layer in self.m.layers:
filename = path + 'layer-' + layer.name
layer.saver.save(self.sess, filename, global_step=0)
# Save expected classification output
np.save(path + 'value-expected', self.expected_values)
np.save(path + 'value-std', self.std)
# Save distance threshold
np.save(path + 'value-dt', self.distance_threshold)
# Save instance mapping
pickle.dump(self.instances_mapping, open(path + 'value-mapping.p', "wb"))
@staticmethod
def load(self, step=0):
path = FLAGS.trainedmodelsdir + FLAGS.model_name + "/"
# Load inputs and classes. Required to set up models.
num_inputs = np.load(path + 'value-inputs' + '.npy')
num_classes = np.load(path + 'value-classes' + '.npy')
# Set up classifier based on config and stored data
net = Classifier(num_inputs=num_inputs, num_classes=num_classes, name=FLAGS.model_name, modeltype=ModelType.str2type(FLAGS.classifier))
for layer in net.m.layers:
filename = path + 'layer-' + layer.name + '-' + str(step)
layer.saver.restore(net.sess, filename)
try:
net.expected_values = np.load(path + 'value-expected' + '.npy')
net.std = np.load(path + 'value-std' + '.npy')
except IOError:
print("[!] Warning: model does not have 'value-expected' and/or 'value-std', and will not be able to perform zero shot classification as a result.")
pass
net.distance_threshold = np.load(path + 'value-dt' + '.npy')
net.instances_mapping = pickle.load(open(path + 'value-mapping.p', "rb"))
return net
def test(self, instances, limit=200):
test_samples_x, test_samples_y = instances.next_batch(False, limit)
# Metrics
accuracy = self.get_accuracy(test_samples_x, test_samples_y)
print(Fore.GREEN + Style.BRIGHT + "[+] Evaluation accuracy for %d samples: %.2f percent" % (limit, accuracy * 100.0))
# Determine to which class a (set of) symbols belongs.
# If clustering is used, then the frame is sent by an attacker if it does not belong to any cluster
def _predict(self, samples_x, adv_detect):
if FLAGS.clustering == "l1nn":
return self._predict_nearest_neighbor_l1(samples_x, adv_detect)
elif FLAGS.clustering == "argmax" or FLAGS.clustering == "none":
if adv_detect: # TODO: Threshold in this case?
print("[!] Warning: adv_detect cannot be used with argmax clustering at the moment")
return self._predict_argmax(samples_x)
else: # Don't do clustering, but use the closest predicted class
print("[!] Warning: unknown clustering approach '%s'; defaulting to 'none'" % FLAGS.clustering)
return self._predict_argmax(samples_x)
# Predict class with least L1 distance to expected weight
def _predict_nearest_neighbor_l1(self, samples_x, adv_detect):
expected_values_distance = self.calculate_expected_values_distance(samples_x)
idmap_predictions = []
for ed in expected_values_distance:
map_id = np.argmin(ed)
if adv_detect and (ed[map_id] > self.distance_threshold[map_id]):
map_id = -1
idmap_predictions.append(map_id)
most_probable = stats.mode(idmap_predictions)[0][0]
return most_probable, idmap_predictions
# Predict class with highest weight
def _predict_argmax(self, samples_x):
idmap_predictions = self.sess.run(tf.argmax(self.m.output_layer.h, 1), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
most_probable = stats.mode(idmap_predictions)[0][0]
return most_probable, idmap_predictions
def _predict_zeroshot(self, samples_x):
weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
probabilities = self.sess.run(tf.nn.softmax(self.m.output_layer.h), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
return weights, probabilities
# Function to visualize confusion matrix and calculate the metrics ourselves
def _print_statistics(self, confusion_matrix):
num_classes = confusion_matrix.shape[0]
true_positives = np.zeros(num_classes)
false_positives = np.zeros(num_classes)
false_negatives = np.zeros(num_classes)
true_negatives = np.zeros(num_classes)
precision = np.zeros(num_classes)
recall = np.zeros(num_classes)
accuracy = np.zeros(num_classes)
# Calculate metrics
for i in range(num_classes):
true_positives[i] = confusion_matrix[i,i]
for i in range(num_classes):
false_positives[i] = np.sum(confusion_matrix[:,i]) - true_positives[i]
for i in range(num_classes):
false_negatives[i] = np.sum(confusion_matrix[i,:]) - true_positives[i]
for i in range(num_classes):
true_negatives[i] = np.sum(confusion_matrix) - (false_positives[i] + false_negatives[i] + true_positives[i])
for i in range(num_classes):
precision[i] = true_positives[i] / (true_positives[i] + false_positives[i])
for i in range(num_classes):
recall[i] = true_positives[i] / (true_positives[i] + false_negatives[i])
for i in range(num_classes):
accuracy[i] = (true_positives[i] + true_negatives[i]) / (true_positives[i] + false_positives[i] + false_negatives[i] + true_negatives[i])
np.set_printoptions(threshold='nan', linewidth=200)
print("Confusion matrix")
print(confusion_matrix)
print("TP")
print(true_positives)
print("FP")
print(false_positives)
print("FN")
print(false_negatives)
print("TN")
print(true_negatives)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("Accuracy")
print(accuracy)
# Accuracy according to Wikipedia. This metric is not correct because
# it counts partially correct samples in the true negatives part of the
# confusion matrix. For example: when class 5 is a true negative with
# respect to a class 3 one-v-all classifier, it is considered correct
# even though the true class is 7.
model_accuracy_partial_correct = np.mean(accuracy)
# Decent metrics
model_accuracy = np.sum(true_positives) / np.sum(confusion_matrix)
model_precision_macro = np.mean(precision)
model_recall_macro = np.mean(recall)
print("Macc_PARTIAL : %.2f" % (model_accuracy_partial_correct*100.0))
print("Macc : %.2f" % (model_accuracy*100.0))
print("Mprec (macro): %.2f" % (model_precision_macro*100.0))
print("Mrec (macro) : %.2f" % (model_recall_macro*100.0))
# Perform a per-sample classification of whether it belongs to a class or not
# This is done by calculating the distance to the expected value (mode) of the
# Gaussian distribution of output weights for each class, and choosing the shortest
# distance.
def bin_class_per_sample(self, instances, limit=200, adv_detect=True, vendor_only=False):
test_samples_x, test_samples_y = instances.next_batch(False, limit)
num_samples = len(test_samples_x)
num_classes = instances.mapping.size+1 if adv_detect else instances.mapping.size # If adv_detect: use extra class for unknown
# Metrics
predicted_y = []
true_y = []
true_y_vis = []
confusion_matrix = np.zeros(shape=(num_classes,num_classes))
print('[+] Predicting %d samples...' % num_samples)
for i in range(0, num_samples):
true_class_map = np.argmax(test_samples_y[i]) # Get the true map ID from the dataset
predicted_class_map,_ = self._predict([test_samples_x[i]], adv_detect) # Get the map ID according to the model
true_class = instances.mapping.map_to_lora_id(true_class_map) # Get the LoRa ID from the dataset
predicted_class = self.instances_mapping.map_to_lora_id(predicted_class_map) # Get the LoRa ID according to the model
if predicted_class is None:
predicted_class = -1
if vendor_only:
true_class = instances.mapping.lora_id_to_vendor_id(true_class)
predicted_class = self.instances_mapping.lora_id_to_vendor_id(predicted_class)
predicted_y.append(predicted_class)
if adv_detect:
if not true_class in self.instances_mapping.keys(): # self.instances_mapping = learned mapping from model
true_y_vis.append(true_class)
true_y.append(-1)
confusion_matrix[0, predicted_class_map+1] += 1 # Make it so adv class(=-1) becomes class 0
else:
true_y.append(true_class)
true_y_vis.append(true_class)
confusion_matrix[true_class_map+1, predicted_class_map+1] += 1
else:
true_y.append(true_class)
true_y_vis.append(true_class)
confusion_matrix[true_class_map, predicted_class_map] += 1
print("[+] True classes encountered: %s" % len(set(true_y)))
self._print_statistics(confusion_matrix) # For debugging
assert(np.sum(confusion_matrix) == num_samples)
metrics = utilities.get_eval_metrics_percent(true_y, predicted_y)
utilities.print_metrics(metrics)
print('[+] Plotting output weights for first %d samples' % num_samples)
self._plot_output_weights_2d(test_samples_x, true_y_vis, predicted_y, instances, metrics)
def bin_class_per_frame(self, frame, symbol_length, adv_detect=True):
dataset = GNURadioDataset(frame, symbol_length)
data_x = [np.array(x["iq"]).flatten() for x in dataset.get()]
map_id, all_map_id_predictions = self._predict(data_x, adv_detect)
# Debug
lora_id_predictions = []
for map_id in all_map_id_predictions:
lora_id_predictions.append(self.instances_mapping.map_to_lora_id(map_id))
print("%s: %s" % (FLAGS.clustering, str(lora_id_predictions)))
return stats.mode(lora_id_predictions)[0][0]
def _labels_to_tsv_file(self, labels, mapping, out=None):
result = ""
for i in range(len(labels)):
result += str(mapping.oh_to_lora_id(labels[i])) + "\n"
if out:
with open(out, "w") as f:
f.write(result)
# TODO: Actually doesn't need to be inside the Classifier class
def visualize_embeddings(self, instances, limit=200, train=True):
print("[+] Gathering instances...")
samples_x, samples_y = instances.next_batch(train, limit)
weights = net.get_output_weights(samples_x)
print(Fore.GREEN + Style.BRIGHT + "[+] Visualizing embeddings for %d samples" % limit)
embeddings_instances = tf.Variable(tf.stack(samples_x, axis=0), trainable=False, name='instances')
embeddings_weights = tf.Variable(tf.stack(weights, axis=0), trainable=False, name='weights')
self.sess.run(tf.variables_initializer([embeddings_instances, embeddings_weights]))
embeddings_saver = tf.train.Saver([embeddings_instances, embeddings_weights])
embeddings_writer = tf.summary.FileWriter(FLAGS.logdir + '/projector', self.sess.graph)
conf = projector.ProjectorConfig()
# Add embeddings
# Instances
e = conf.embeddings.add()
e.tensor_name = embeddings_instances.name
self._labels_to_tsv_file(samples_y, instances.mapping, out=FLAGS.logdir + '/projector/metadata.tsv')
e.metadata_path = FLAGS.logdir + '/projector/metadata.tsv'
# Generate sprite, save to tmp and assign here
#e.sprite.image_path = FLAGS.logdir +
#e.sprite.single_image_dim.extend([1024, 768])
# Weights
e = conf.embeddings.add()
e.tensor_name = embeddings_weights.name
self._labels_to_tsv_file(samples_y, instances.mapping, out=FLAGS.logdir + '/projector/metadata.tsv')
e.metadata_path = FLAGS.logdir + '/projector/metadata.tsv'
projector.visualize_embeddings(embeddings_writer, conf)
embeddings_saver.save(self.sess, FLAGS.logdir + '/projector/model_embeddings.ckpt')
# Calculates distance between pairs of centroids
def _intercluster_distance(self, centroids, method='min'):
num_centroids = len(centroids)
if not method in ['min','mean','mean_of_min']:
print("[!] Warning: _intercluster_distance: no such method '%s'. Defaulting to 'min'." % method)
method = 'min'
print("[+] Finding %s distance between %d centroids" % ("minimum" if method == 'min' else ("mean" if method == "mean" else "mean of minimum"), num_centroids))
if method == 'mean_of_min':
minimums = []
for i in range(len(centroids)):
first = centroids[i]
distances = []
for j in range(len(centroids)):
if i == j:
continue
second = centroids[j]
distance = np.linalg.norm(second - first)
distances.append(distance)
minimums.append(np.min(distances))
return np.mean(minimums)
else:
distances = []
for pair in combinations(range(num_centroids), 2):
distance = np.linalg.norm(centroids[pair[0]] - centroids[pair[1]])
distances.append(distance)
if method == 'min':
return np.min(distances)
elif method == 'mean':
return np.mean(distances)
# Convert predicted labels to real labels so that they can be compared
# in terms of accuracy
def _get_zeroshot_labels(self, dbscan_labels, real_labels):
counts = defaultdict(list)
# Get dbscan labels for each real label
for i in range(len(real_labels)):
counts[real_labels[i]].append(dbscan_labels[i])
# Get most frequent dbscan label for each real label
# and use dbscan label as key for lookup dict
keys = {}
keys_counts = defaultdict(lambda: 0)
for key in set(real_labels):
mode_count = stats.mode(counts[key])[1][0]
mode_value = stats.mode(counts[key])[0][0]
if mode_count > keys_counts[mode_value]:
keys[mode_value] = key
keys_counts[mode_value] = mode_count
# Apply lookup dict to transform labels
result = []
for i in range(len(dbscan_labels)):
try:
result.append(keys[dbscan_labels[i]])
except KeyError: # No prevalent real label for this dbscan label found, so use outlier
result.append(-1)
return np.array(result)
def classify_zeroshot(self, instances, limit=40, threshold_outlier=0.0001, vendor_only=False):
num_mixtures = len(self.std)
mixtures = []
outlier_points = []
outlier_labels = []
print("[+] Gathering test samples")
test_samples_x, test_samples_y = instances.next_batch(False, limit)
num_samples = len(test_samples_x)
print("[+] Building %d gaussian mixtures based on trained parameters" % num_mixtures)
from scipy.stats import multivariate_normal
for i in range(num_mixtures):
# TF method
#g = tf.contrib.distributions.Normal(mu=self.expected_values[i], sigma=self.std[i])
# Numpy method
#g = multivariate_normal(self.expected_values[i], np.diag(np.power(self.std[i], 2)))
g = NumpyNormWrapper(mu=self.expected_values[i], sigma=self.std[i])
mixtures.append(g)
print("[+] Finding inter-cluster distance of training samples")
icd = self._intercluster_distance(self.expected_values, method='mean_of_min')
print("[+] ICD is %f" % icd)
print("[+] Calculating weights and probabilities")
weights, probabilities = self._predict_zeroshot(test_samples_x)
print("[+] Calculating marginals")
marginals = np.zeros(shape=(num_samples, num_mixtures))
for i in range(num_samples):
point = weights[i]
pi = probabilities[i]
for j in range(num_mixtures):
# TF method
#marginals[i] += pi[j] * self.sess.run(mixtures[j].pdf(point))
# Numpy method
marginals[i] += pi[j] * mixtures[j].pdf(point)
outlier = False
for j in range(num_mixtures):
if marginals[i][j] < threshold_outlier:
outlier = True
outlier_points.append(point)
lora_id = instances.mapping.oh_to_lora_id(test_samples_y[i])
if vendor_only: # If we only care about classifying correct vendor
lora_id = instances.mapping.lora_id_to_vendor_id(lora_id)
outlier_labels.append(lora_id)
break
#print("%02d: %s | marg:%s, pi:%s, meanmarg:%s (%d/%d)" % (instances.mapping.oh_to_lora_id(test_samples_y[i]), str(outlier), str(marginals[i]), pi, str(np.mean(marginals[i])),i,num_samples))
print("[+] Finding nearest neighbors based on inter-cluster distance of training data")
db = DBSCAN(eps=icd, min_samples=1).fit(outlier_points)
zeroshot_labels = self._get_zeroshot_labels(db.labels_, outlier_labels)
guess_clusters = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
print(db.labels_)
print(np.array(outlier_labels))
print(zeroshot_labels)
print(guess_clusters)
metrics = utilities.get_eval_metrics_percent(outlier_labels, zeroshot_labels)
utilities.print_metrics(metrics)
# Reduce dimensionality of weights to 2
tsne = TSNE(n_components=2, init='pca', n_iter=5000)
vis = tsne.fit_transform(outlier_points)
visualization.plot_weights(vis, outlier_labels, zeroshot_labels, None, None, instances.mapping, tag=self.name+"-zero-w", metrics=metrics, tf=True)
# Class to make numpy normal distribution act the same as TF normal distribution
class NumpyNormWrapper():
def __init__(self, mu, sigma):
from scipy.stats import norm
if len(mu) != len(sigma):
raise Exception
# Initialize
self.num_distributions = len(mu)
self.distributions = []
for i in range(self.num_distributions):
self.distributions.append(norm(mu[i], sigma[i]))
def pdf(self, values):
if len(values) != self.num_distributions:
raise Exception
result = []
for i in range(self.num_distributions):
result.append(self.distributions[i].pdf(values[i]))
return np.array(result)
# ----------------------------------------------------
# ML layers
# ----------------------------------------------------
class NNLayer():
def __init__(self, inputs, Wshape, bshape, name=''): # input features and outputs
self.inputs = inputs
self.Wshape = Wshape
self.bshape = bshape
self.name = name
# Define model
self.W = tf.Variable(tf.random_normal(Wshape)) # Filter kernel
self.b = tf.Variable(tf.random_normal(bshape))
# Input: [batch, height, width, channels]
# Kernel: [filter_height, filter_width, in_channels, out_channels]
k = FLAGS.pooling_kernel_width
s = 1
self.conv = tf.nn.conv2d(inputs, self.W, strides=[1, 1, s, 1], padding='SAME') # Convolution Layer
self.conv_b = tf.nn.bias_add(self.conv, self.b) # Convolution layer bias
self.relu = tf.nn.relu(self.conv_b) # ReLU activation layer
self.h = tf.nn.max_pool(self.relu, ksize=[1, 1, k, 1], strides=[1, 1, k, 1], padding='SAME') # Max pooling layer (down-sampling)
self.saver = tf.train.Saver([self.W, self.b])
class LinearLayer():
def __init__(self, inputs, num_inputs, num_outputs, name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.add(tf.matmul(inputs, self.W), self.b)
self.saver = tf.train.Saver([self.W, self.b])
class LinearReluLayer():
def __init__(self, inputs, num_inputs, num_outputs, name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.nn.relu(tf.add(tf.matmul(inputs, self.W), self.b))
self.saver = tf.train.Saver([self.W, self.b])
class MixtureLayer():
def __init__(self, inputs, num_inputs, num_mixtures, mixture_type='gaussian', name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_mixtures = num_mixtures
self.num_components = 3
self.num_outputs = self.num_mixtures * self.num_components
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([self.num_inputs, self.num_outputs]))
self.b = tf.Variable(tf.zeros([self.num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([self.num_inputs, self.num_outputs], stddev=0.1))
self.b = tf.Variable(tf.random_normal([self.num_outputs], stddev=0.1))
# Mixture model hypothesis
tanh_inputs = tf.nn.tanh(inputs)
self.h = tf.add(tf.matmul(tanh_inputs, self.W), self.b)
self.saver = tf.train.Saver([self.W, self.b])
class LinearReluDropLayer():
def __init__(self, inputs, num_inputs, num_outputs, keep, name=''):
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.add(tf.matmul(inputs, self.W), self.b)
self.h = tf.nn.relu(self.h)
self.h = tf.nn.dropout(self.h, keep)
self.saver = tf.train.Saver([self.W, self.b])
class SoftmaxLayer():
def __init__(self, inputs, num_inputs, num_outputs, name=''): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
self.h = tf.nn.softmax(tf.add(tf.matmul(inputs, self.W), self.b)) # Hypothesis
# If requested, save weights W and biases b
self.saver = tf.train.Saver([self.W, self.b])
# ----------------------------------------------------
# Standalone run code
# ----------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tensorflow based fingerprinting of devices implementing the LoRa PHY layer')
parser.add_argument('action', type=str, choices=['train', 'test', 'train_embeddings', 'test_embeddings', 'zeroshot'], help='Action to perform')
parser.add_argument('configfile', type=str, help='Path to the config file to use')
parser.add_argument('--dt', dest='distance_threshold', type=str, help='Distance threshold to determine whether a device is an adversary. Set to "auto" to calculate automatically', default='auto')
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Debug mode')
parser.add_argument('--save', dest='save', action='store_true', default=False, help='Save trained network')
parser.add_argument('--adv', dest='adv', action='store_true', default=False, help='Treat excluded classes as attackers')
parser.add_argument('--vendor', dest='vendor', action='store_true', default=False, help='Test on chip model only')
parser.add_argument('--natural', dest='natural', action='store_true', default=False, help='Natural sorting')
args, unknown = parser.parse_known_args()
# Argument preprocessing]
if args.distance_threshold != 'auto': # Define distance threshold
args.distance_threshold = float(args.distance_threshold)
# Conf stuff
load_conf(args.configfile)
print_conf(cp)
if tf.gfile.Exists(FLAGS.logdir):
tf.gfile.DeleteRecursively(FLAGS.logdir) # Clean tmp dir
if type(FLAGS.exclude_classes) == str and FLAGS.exclude_classes != '': # Exclude classes from training
exclude_classes = [int(x) for x in FLAGS.exclude_classes.split(',')]
else:
exclude_classes = []
# Let's go
if args.action == 'train':
print("[+] Excluding %s" % str(exclude_classes))
instances = Instances(limit=FLAGS.limit, exclude_classes=exclude_classes, name="train")
if cp.get("DEFAULT", "classifier") == 'svm':
net = SVM(name=FLAGS.model_name)
else:
net = Classifier(num_inputs=instances.limit, num_classes=instances.mapping.size, name=FLAGS.model_name, modeltype=ModelType.str2type(FLAGS.classifier))
net.train(instances, batch_size=FLAGS.batch_size)
if args.save:
net.save()
net.bin_class_per_sample(instances, limit=1000, adv_detect=False, vendor_only=False) # Never adv detect during training
net.visualize_embeddings(instances, limit=1000, train=True)
elif args.action == 'test':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
if cp.get("DEFAULT", "classifier") == 'svm':
net = SVM.load()
else:
net = Classifier.load(0)
print("[+] Testing...")
net.bin_class_per_sample(instances, limit=1500, adv_detect=args.adv, vendor_only=args.vendor)
net.visualize_embeddings(instances, limit=1000, train=False)
elif args.action == 'train_embeddings':
instances = Instances(limit=FLAGS.limit, exclude_classes=exclude_classes, name="train")
print("[+] Loading model...")
net = Classifier.load(0)
net.visualize_embeddings(instances, limit=1000, train=True)
elif args.action == 'test_embeddings':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
print("[+] Loading model...")
net = Classifier.load(0)
net.visualize_embeddings(instances, limit=1000, train=False)
elif args.action == 'zeroshot':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
net = Classifier.load(0)
net.classify_zeroshot(instances, FLAGS.num_zs_test_samples, vendor_only=args.vendor)
|
bsd-3-clause
|
DeercoderResearch/pyexperiment
|
setup.py
|
3
|
2159
|
"""Setup for pyexperiment
"""
from __future__ import print_function
# from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import os
import sys
from setuptools import setup
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if ON_RTD:
__version__ = 'master'
else:
# Hack to avoid having to import __init__.py before pyexperiment
# is installed
sys.path.insert(0, os.path.abspath('./pyexperiment'))
from version import __version__
sys.path.pop(0)
read_plain = lambda fname: open(
os.path.join(os.path.dirname(__file__), fname), 'r').read()
try:
from pypandoc import convert
read_md = lambda fname: convert(fname, 'rst')
except ImportError:
print("Warning: pypandoc module not found")
read_md = read_plain
LONG_DESCRIPTION = 'Framework for quick and clean experiments with python.'
if os.path.exists('README.rst'):
print("README.rst found...")
LONG_DESCRIPTION = read_plain('README.rst')
elif os.path.exists('README.md'):
print("RADME.md found, converting to rst")
LONG_DESCRIPTION = read_md('README.md')
setup(
name="pyexperiment",
version=__version__,
author="Peter Duerr",
author_email="[email protected]",
description="Run experiments with Python - quick and clean.",
license="MIT",
keywords="science experiment",
url="https://github.com/duerrp/pyexperiment",
packages=['pyexperiment',
'pyexperiment.utils'],
long_description=LONG_DESCRIPTION,
install_requires=[
'six',
'configobj',
'numpy',
'h5py',
'matplotlib',
'mock',
'lockfile',
'toolz'
],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
],
)
|
mit
|
TheMutley/openpilot
|
panda/tests/safety/test_honda.py
|
1
|
6359
|
#!/usr/bin/env python2
import unittest
import numpy as np
import libpandasafety_py
class TestHondaSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.honda_init(0)
cls.safety.init_tests_honda()
def _speed_msg(self, speed):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x158 << 21
to_send[0].RDLR = speed
return to_send
def _button_msg(self, buttons, msg):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = msg << 21
to_send[0].RDLR = buttons << 5
return to_send
def _brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x17C << 21
to_send[0].RDHR = 0x200000 if brake else 0
return to_send
def _alt_brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x1BE << 21
to_send[0].RDLR = 0x10 if brake else 0
return to_send
def _gas_msg(self, gas):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x17C << 21
to_send[0].RDLR = 1 if gas else 0
return to_send
def _send_brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x1FA << 21
to_send[0].RDLR = brake
return to_send
def _send_gas_msg(self, gas):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x200 << 21
to_send[0].RDLR = gas
return to_send
def _send_steer_msg(self, steer):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0xE4 << 21
to_send[0].RDLR = steer
return to_send
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_resume_button(self):
RESUME_BTN = 4
self.safety.set_controls_allowed(0)
self.safety.honda_rx_hook(self._button_msg(RESUME_BTN, 0x1A6))
self.assertTrue(self.safety.get_controls_allowed())
def test_set_button(self):
SET_BTN = 3
self.safety.set_controls_allowed(0)
self.safety.honda_rx_hook(self._button_msg(SET_BTN, 0x1A6))
self.assertTrue(self.safety.get_controls_allowed())
def test_cancel_button(self):
CANCEL_BTN = 2
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._button_msg(CANCEL_BTN, 0x1A6))
self.assertFalse(self.safety.get_controls_allowed())
def test_sample_speed(self):
self.assertEqual(0, self.safety.get_ego_speed())
self.safety.honda_rx_hook(self._speed_msg(100))
self.assertEqual(100, self.safety.get_ego_speed())
def test_prev_brake(self):
self.assertFalse(self.safety.get_brake_prev())
self.safety.honda_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_brake_prev())
def test_disengage_on_brake(self):
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_alt_disengage_on_brake(self):
self.safety.set_honda_alt_brake_msg(1)
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._alt_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_honda_alt_brake_msg(0)
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._alt_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_allow_brake_at_zero_speed(self):
# Brake was already pressed
self.safety.honda_rx_hook(self._brake_msg(True))
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.honda_rx_hook(self._brake_msg(False)) # reset no brakes
def test_not_allow_brake_when_moving(self):
# Brake was already pressed
self.safety.honda_rx_hook(self._brake_msg(True))
self.safety.honda_rx_hook(self._speed_msg(100))
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._brake_msg(True))
self.assertFalse(self.safety.get_controls_allowed())
def test_prev_gas(self):
self.assertFalse(self.safety.get_gas_prev())
self.safety.honda_rx_hook(self._gas_msg(True))
self.assertTrue(self.safety.get_gas_prev())
def test_disengage_on_gas(self):
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._gas_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_allow_engage_with_gas_pressed(self):
self.safety.honda_rx_hook(self._gas_msg(1))
self.safety.set_controls_allowed(1)
self.safety.honda_rx_hook(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_brake_safety_check(self):
self.assertTrue(self.safety.honda_tx_hook(self._send_brake_msg(0x0000)))
self.assertFalse(self.safety.honda_tx_hook(self._send_brake_msg(0x1000)))
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.honda_tx_hook(self._send_brake_msg(0x1000)))
self.assertFalse(self.safety.honda_tx_hook(self._send_brake_msg(0x00F0)))
def test_gas_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.honda_tx_hook(self._send_gas_msg(0x0000)))
self.assertFalse(self.safety.honda_tx_hook(self._send_gas_msg(0x1000)))
def test_steer_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.honda_tx_hook(self._send_steer_msg(0x0000)))
self.assertFalse(self.safety.honda_tx_hook(self._send_steer_msg(0x1000)))
def test_spam_cancel_safety_check(self):
RESUME_BTN = 4
SET_BTN = 3
CANCEL_BTN = 2
BUTTON_MSG = 0x296
self.safety.set_bosch_hardware(1)
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.honda_tx_hook(self._button_msg(CANCEL_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.honda_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.honda_tx_hook(self._button_msg(SET_BTN, BUTTON_MSG)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.honda_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
if __name__ == "__main__":
unittest.main()
|
mit
|
jw2100/beginning.github.io
|
MachineLearning/MachineLearningInAction/machinelearninginaction/Ch06/EXTRAS/notLinSeperable.py
|
7
|
2336
|
'''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
markers =[]
colors =[]
fr = open('testSet.txt')#this file was generated by 2normalGen.py
for line in fr.readlines():
lineSplit = line.strip().split('\t')
xPt = float(lineSplit[0])
yPt = float(lineSplit[1])
label = int(lineSplit[2])
if (label == 0):
xcord0.append(xPt)
ycord0.append(yPt)
else:
xcord1.append(xPt)
ycord1.append(yPt)
fr.close()
fig = plt.figure()
ax = fig.add_subplot(221)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if ((x > 0.5) and (y < 0.5)) or ((x < 0.5) and (y > 0.5)):
xcord0.append(x); ycord0.append(y)
else:
xcord1.append(x); ycord1.append(y)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('A')
ax = fig.add_subplot(222)
xcord0 = random.standard_normal(150); ycord0 = random.standard_normal(150)
xcord1 = random.standard_normal(150)+2.0; ycord1 = random.standard_normal(150)+2.0
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('B')
ax = fig.add_subplot(223)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if (x > 0.5):
xcord0.append(x*cos(2.0*pi*y)); ycord0.append(x*sin(2.0*pi*y))
else:
xcord1.append(x*cos(2.0*pi*y)); ycord1.append(x*sin(2.0*pi*y))
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('C')
ax = fig.add_subplot(224)
xcord1 = zeros(150); ycord1 = zeros(150)
xcord0 = random.uniform(-3,3,350); ycord0 = random.uniform(-3,3,350);
xcord1[0:50] = 0.3*random.standard_normal(50)+2.0; ycord1[0:50] = 0.3*random.standard_normal(50)+2.0
xcord1[50:100] = 0.3*random.standard_normal(50)-2.0; ycord1[50:100] = 0.3*random.standard_normal(50)-3.0
xcord1[100:150] = 0.3*random.standard_normal(50)+1.0; ycord1[100:150] = 0.3*random.standard_normal(50)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('D')
plt.show()
|
gpl-3.0
|
idf/dotfiles
|
ipython/ipython_config.py
|
1
|
23145
|
# Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = u''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = u''
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
# The IPython profile to use.
# c.BaseIPythonApplication.profile = u'default'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 2.7.9 (default, Apr 2 2015, 15:33:21) \nType "copyright", "credits" or "license" for more information.\n\nIPython 4.1.2 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.InteractiveShell.colors = 'Linux'
#
# c.InteractiveShell.debug = False
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
#
# c.InteractiveShell.history_length = 10000
# The number of saved history entries to be loaded into the readline buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#
# c.InteractiveShell.ipython_dir = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
# Save multi-line entries as one entry in readline history
# c.InteractiveShell.multiline_history = True
#
# c.InteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
# Deprecated, will be removed in IPython 5.0, use PromptManager.in_template
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.in2_template
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.out_template
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.justify
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
#
# c.InteractiveShell.readline_remove_delims = '-/~'
#
# c.InteractiveShell.readline_use = True
#
# c.InteractiveShell.separate_in = '\n'
#
# c.InteractiveShell.separate_out = ''
#
# c.InteractiveShell.separate_out2 = ''
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
#
# c.InteractiveShell.wildcards_case_sensitive = True
#
# c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'emacsclient -t'
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
#
# c.PromptManager.color_scheme = 'Linux'
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.deferred_printers = {}
#
# c.BaseFormatter.enabled = True
#
# c.BaseFormatter.singleton_printers = {}
#
# c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
#
# c.PlainTextFormatter.float_precision = ''
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
%config IPCompleter.greedy=True
|
mit
|
walshjon/openmc
|
setup.py
|
1
|
2138
|
#!/usr/bin/env python
import glob
import sys
import numpy as np
from setuptools import setup, find_packages
try:
from Cython.Build import cythonize
have_cython = True
except ImportError:
have_cython = False
# Determine shared library suffix
if sys.platform == 'darwin':
suffix = 'dylib'
else:
suffix = 'so'
# Get version information from __init__.py. This is ugly, but more reliable than
# using an import.
with open('openmc/__init__.py', 'r') as f:
version = f.readlines()[-1].split()[-1].strip("'")
kwargs = {
'name': 'openmc',
'version': version,
'packages': find_packages(exclude=['tests*']),
'scripts': glob.glob('scripts/openmc-*'),
# Data files and librarries
'package_data': {
'openmc.capi': ['libopenmc.{}'.format(suffix)],
'openmc.data': ['mass.mas12', 'fission_Q_data_endfb71.h5']
},
# Metadata
'author': 'The OpenMC Development Team',
'author_email': '[email protected]',
'description': 'OpenMC',
'url': 'https://github.com/mit-crpg/openmc',
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Scientific/Engineering'
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# Required dependencies
'install_requires': [
'numpy>=1.9', 'h5py', 'scipy', 'ipython', 'matplotlib',
'pandas', 'lxml', 'uncertainties'
],
# Optional dependencies
'extras_require': {
'test': ['pytest', 'pytest-cov'],
'vtk': ['vtk', 'silomesh'],
},
}
# If Cython is present, add resonance reconstruction capability
if have_cython:
kwargs.update({
'ext_modules': cythonize('openmc/data/reconstruct.pyx'),
'include_dirs': [np.get_include()]
})
setup(**kwargs)
|
mit
|
brclark-usgs/flopy
|
flopy/plot/map.py
|
1
|
30643
|
import copy
import numpy as np
try:
import matplotlib.pyplot as plt
import matplotlib.colors
except:
plt = None
from . import plotutil
from .plotutil import bc_color_dict
from ..utils import SpatialReference
class ModelMap(object):
"""
Class to create a map of the model.
Parameters
----------
sr : flopy.utils.reference.SpatialReference
The spatial reference class (Default is None)
ax : matplotlib.pyplot axis
The plot axis. If not provided it, plt.gca() will be used.
If there is not a current axis then a new one will be created.
model : flopy.modflow object
flopy model object. (Default is None)
dis : flopy.modflow.ModflowDis object
flopy discretization object. (Default is None)
layer : int
Layer to plot. Default is 0. Must be between 0 and nlay - 1.
xul : float
x coordinate for upper left corner
yul : float
y coordinate for upper left corner. The default is the sum of the
delc array.
rotation : float
Angle of grid rotation around the upper left corner. A positive value
indicates clockwise rotation. Angles are in degrees.
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
Notes
-----
ModelMap must know the position and rotation of the grid in order to make
the plot. This information is contained in the SpatialReference class
(sr), which can be passed. If sr is None, then it looks for sr in dis.
If dis is None, then it looks for sr in model.dis. If all of these
arguments are none, then it uses xul, yul, and rotation. If none of these
arguments are provided, then it puts the lower-left-hand corner of the
grid at (0, 0).
"""
def __init__(self, sr=None, ax=None, model=None, dis=None, layer=0,
extent=None, xul=None, yul=None, xll=None, yll=None,
rotation=0., length_multiplier=1.):
if plt is None:
s = 'Could not import matplotlib. Must install matplotlib ' + \
' in order to use ModelMap method'
raise Exception(s)
self.model = model
self.layer = layer
self.dis = dis
self.sr = None
if sr is not None:
self.sr = copy.deepcopy(sr)
elif dis is not None:
# print("warning: the dis arg to model map is deprecated")
self.sr = copy.deepcopy(dis.parent.sr)
elif model is not None:
# print("warning: the model arg to model map is deprecated")
self.sr = copy.deepcopy(model.sr)
else:
self.sr = SpatialReference(xll=xll, yll=yll, xul=xul, yul=yul,
rotation=rotation,
length_multiplier=length_multiplier)
# model map override spatial reference settings
if any(elem is not None for elem in (xul, yul, xll, yll)) or \
rotation != 0 or length_multiplier != 1.:
self.sr.length_multiplier = length_multiplier
self.sr.set_spatialreference(xul, yul, xll, yll, rotation)
if ax is None:
try:
self.ax = plt.gca()
self.ax.set_aspect('equal')
except:
self.ax = plt.subplot(1, 1, 1, aspect='equal', axisbg="white")
else:
self.ax = ax
if extent is not None:
self._extent = extent
else:
self._extent = None
return
@property
def extent(self):
if self._extent is None:
self._extent = self.sr.get_extent()
return self._extent
def plot_array(self, a, masked_values=None, **kwargs):
"""
Plot an array. If the array is three-dimensional, then the method
will plot the layer tied to this class (self.layer).
Parameters
----------
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if a.ndim == 3:
plotarray = a[self.layer, :, :]
elif a.ndim == 2:
plotarray = a
elif a.ndim == 1:
plotarray = a
else:
raise Exception('Array must be of dimension 1, 2 or 3')
if masked_values is not None:
for mval in masked_values:
plotarray = np.ma.masked_equal(plotarray, mval)
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
# quadmesh = ax.pcolormesh(self.sr.xgrid, self.sr.ygrid, plotarray,
# **kwargs)
quadmesh = self.sr.plot_array(plotarray, ax=ax)
# set max and min
if 'vmin' in kwargs:
vmin = kwargs.pop('vmin')
else:
vmin = None
if 'vmax' in kwargs:
vmax = kwargs.pop('vmax')
else:
vmax = None
quadmesh.set_clim(vmin=vmin, vmax=vmax)
# send rest of kwargs to quadmesh
quadmesh.set(**kwargs)
# add collection to axis
ax.add_collection(quadmesh)
# set limits
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return quadmesh
def contour_array(self, a, masked_values=None, **kwargs):
"""
Contour an array. If the array is three-dimensional, then the method
will contour the layer tied to this class (self.layer).
Parameters
----------
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
contour_set : matplotlib.pyplot.contour
"""
if a.ndim == 3:
plotarray = a[self.layer, :, :]
elif a.ndim == 2:
plotarray = a
elif a.ndim == 1:
plotarray = a
else:
raise Exception('Array must be of dimension 1, 2 or 3')
if masked_values is not None:
for mval in masked_values:
plotarray = np.ma.masked_equal(plotarray, mval)
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if 'colors' in kwargs.keys():
if 'cmap' in kwargs.keys():
cmap = kwargs.pop('cmap')
cmap = None
# contour_set = ax.contour(self.sr.xcentergrid, self.sr.ycentergrid,
# plotarray, **kwargs)
contour_set = self.sr.contour_array(ax, plotarray, **kwargs)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return contour_set
def plot_inactive(self, ibound=None, color_noflow='black', **kwargs):
"""
Make a plot of inactive cells. If not specified, then pull ibound
from the self.ml
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in 'BAS6' package.)
color_noflow : string
(Default is 'black')
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if ibound is None:
bas = self.model.get_package('BAS6')
ibound = bas.ibound.array
plotarray = np.zeros(ibound.shape, dtype=np.int)
idx1 = (ibound == 0)
plotarray[idx1] = 1
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(['0', color_noflow])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_ibound(self, ibound=None, color_noflow='black', color_ch='blue',
**kwargs):
"""
Make a plot of ibound. If not specified, then pull ibound from the
self.ml
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in 'BAS6' package.)
color_noflow : string
(Default is 'black')
color_ch : string
Color for constant heads (Default is 'blue'.)
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if ibound is None:
bas = self.model.get_package('BAS6')
ibound = bas.ibound.array
plotarray = np.zeros(ibound.shape, dtype=np.int)
idx1 = (ibound == 0)
idx2 = (ibound < 0)
plotarray[idx1] = 1
plotarray[idx2] = 2
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(['0', color_noflow, color_ch])
bounds = [0, 1, 2, 3]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_grid(self, **kwargs):
"""
Plot the grid lines.
Parameters
----------
kwargs : ax, colors. The remaining kwargs are passed into the
the LineCollection constructor.
Returns
-------
lc : matplotlib.collections.LineCollection
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if 'colors' not in kwargs:
kwargs['colors'] = '0.5'
lc = self.sr.get_grid_line_collection(**kwargs)
ax.add_collection(lc)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return lc
def plot_bc(self, ftype=None, package=None, kper=0, color=None,
plotAll=False,
**kwargs):
"""
Plot boundary conditions locations for a specific boundary
type from a flopy model
Parameters
----------
ftype : string
Package name string ('WEL', 'GHB', etc.). (Default is None)
package : flopy.modflow.Modflow package class instance
flopy package class instance. (Default is None)
kper : int
Stress period to plot
color : string
matplotlib color string. (Default is None)
plotAll : bool
Boolean used to specify that boundary condition locations for all
layers will be plotted on the current ModelMap layer.
(Default is False)
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
# Find package to plot
if package is not None:
p = package
ftype = p.name[0]
elif self.model is not None:
if ftype is None:
raise Exception('ftype not specified')
ftype = ftype.upper()
p = self.model.get_package(ftype)
else:
raise Exception('Cannot find package to plot')
# Get the list data
try:
mflist = p.stress_period_data[kper]
except Exception as e:
raise Exception('Not a list-style boundary package:' + str(e))
# Return if MfList is None
if mflist is None:
return None
nlay = self.model.nlay
# Plot the list locations
plotarray = np.zeros((nlay, self.sr.nrow, self.sr.ncol), dtype=np.int)
if plotAll:
idx = [mflist['i'], mflist['j']]
# plotarray[:, idx] = 1
pa = np.zeros((self.sr.nrow, self.sr.ncol), dtype=np.int)
pa[idx] = 1
for k in range(nlay):
plotarray[k, :, :] = pa.copy()
else:
idx = [mflist['k'], mflist['i'], mflist['j']]
plotarray[idx] = 1
plotarray = np.ma.masked_equal(plotarray, 0)
if color is None:
if ftype in bc_color_dict:
c = bc_color_dict[ftype]
else:
c = bc_color_dict['default']
else:
c = color
cmap = matplotlib.colors.ListedColormap(['0', c])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_shapefile(self, shp, **kwargs):
"""
Plot a shapefile. The shapefile must be in the same coordinates as
the rotated and offset grid.
Parameters
----------
shp : string
Name of the shapefile to plot
kwargs : dictionary
Keyword arguments passed to plotutil.plot_shapefile()
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
patch_collection = plotutil.plot_shapefile(shp, ax, **kwargs)
return patch_collection
def plot_cvfd(self, verts, iverts, **kwargs):
"""
Plot a cvfd grid. The vertices must be in the same coordinates as
the rotated and offset grid.
Parameters
----------
verts : ndarray
2d array of x and y points.
iverts : list of lists
should be of len(ncells) with a list of vertex number for each cell
kwargs : dictionary
Keyword arguments passed to plotutil.plot_cvfd()
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
patch_collection = plotutil.plot_cvfd(verts, iverts, ax, self.layer,
**kwargs)
return patch_collection
def contour_array_cvfd(self, vertc, a, masked_values=None, **kwargs):
"""
Contour an array. If the array is three-dimensional, then the method
will contour the layer tied to this class (self.layer).
Parameters
----------
vertc : np.ndarray
Array with of size (nc, 2) with centroid location of cvfd
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
contour_set : matplotlib.pyplot.contour
"""
if 'ncpl' in kwargs:
nlay = self.layer + 1
ncpl = kwargs.pop('ncpl')
if isinstance(ncpl, int):
i = int(ncpl)
ncpl = np.ones((nlay), dtype=np.int) * i
elif isinstance(ncpl, list) or isinstance(ncpl, tuple):
ncpl = np.array(ncpl)
i0 = 0
i1 = 0
for k in range(nlay):
i0 = i1
i1 = i0 + ncpl[k]
# retain vertc in selected layer
vertc = vertc[i0:i1, :]
else:
i0 = 0
i1 = vertc.shape[0]
plotarray = a[i0:i1]
if masked_values is not None:
for mval in masked_values:
plotarray = np.ma.masked_equal(plotarray, mval)
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if 'colors' in kwargs.keys():
if 'cmap' in kwargs.keys():
cmap = kwargs.pop('cmap')
cmap = None
contour_set = ax.tricontour(vertc[:, 0], vertc[:, 1],
plotarray, **kwargs)
return contour_set
def plot_discharge(self, frf, fff, dis=None, flf=None, head=None, istep=1,
jstep=1, normalize=False, **kwargs):
"""
Use quiver to plot vectors.
Parameters
----------
frf : numpy.ndarray
MODFLOW's 'flow right face'
fff : numpy.ndarray
MODFLOW's 'flow front face'
flf : numpy.ndarray
MODFLOW's 'flow lower face' (Default is None.)
head : numpy.ndarray
MODFLOW's head array. If not provided, then will assume confined
conditions in order to calculated saturated thickness.
istep : int
row frequency to plot. (Default is 1.)
jstep : int
column frequency to plot. (Default is 1.)
normalize : bool
boolean flag used to determine if discharge vectors should
be normalized using the magnitude of the specific discharge in each
cell. (default is False)
kwargs : dictionary
Keyword arguments passed to plt.quiver()
Returns
-------
quiver : matplotlib.pyplot.quiver
Vectors of specific discharge.
"""
# remove 'pivot' keyword argument
# by default the center of the arrow is plotted in the center of a cell
if 'pivot' in kwargs:
pivot = kwargs.pop('pivot')
else:
pivot = 'middle'
# Calculate specific discharge
# make sure dis is defined
if dis is None:
if self.model is not None:
dis = self.model.dis
else:
print('ModelMap.plot_quiver() error: self.dis is None and dis '
'arg is None.')
return
ib = self.model.bas6.ibound.array
delr = dis.delr.array
delc = dis.delc.array
top = dis.top.array
botm = dis.botm.array
nlay, nrow, ncol = botm.shape
laytyp = None
hnoflo = 999.
hdry = 999.
if self.model is not None:
lpf = self.model.get_package('LPF')
if lpf is not None:
laytyp = lpf.laytyp.array
hdry = lpf.hdry
bas = self.model.get_package('BAS6')
if bas is not None:
hnoflo = bas.hnoflo
# If no access to head or laytyp, then calculate confined saturated
# thickness by setting laytyp to zeros
if head is None or laytyp is None:
head = np.zeros(botm.shape, np.float32)
laytyp = np.zeros((nlay), dtype=np.int)
sat_thk = plotutil.saturated_thickness(head, top, botm, laytyp,
[hnoflo, hdry])
# Calculate specific discharge
qx, qy, qz = plotutil.centered_specific_discharge(frf, fff, flf, delr,
delc, sat_thk)
# Select correct slice
u = qx[self.layer, :, :]
v = qy[self.layer, :, :]
# apply step
x = self.sr.xcentergrid[::istep, ::jstep]
y = self.sr.ycentergrid[::istep, ::jstep]
u = u[::istep, ::jstep]
v = v[::istep, ::jstep]
# normalize
if normalize:
vmag = np.sqrt(u ** 2. + v ** 2.)
idx = vmag > 0.
u[idx] /= vmag[idx]
v[idx] /= vmag[idx]
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
# mask discharge in inactive cells
idx = (ib[self.layer, ::istep, ::jstep] == 0)
u[idx] = np.nan
v[idx] = np.nan
# Rotate and plot
urot, vrot = self.sr.rotate(u, v, self.sr.rotation)
quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs)
return quiver
def plot_pathline(self, pl, travel_time=None, **kwargs):
"""
Plot the MODPATH pathlines.
Parameters
----------
pl : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile PathlineFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time: float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from matplotlib.collections import LineCollection
# make sure pathlines is a list
if not isinstance(pl, list):
pl = [pl]
if 'layer' in kwargs:
kon = kwargs.pop('layer')
if isinstance(kon, bytes):
kon = kon.decode()
if isinstance(kon, str):
if kon.lower() == 'all':
kon = -1
else:
kon = self.layer
else:
kon = self.layer
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
if 'colors' not in kwargs:
kwargs['colors'] = '0.5'
linecol = []
for p in pl:
if travel_time is None:
tp = p.copy()
else:
if isinstance(travel_time, str):
if '<=' in travel_time:
time = float(travel_time.replace('<=', ''))
idx = (p['time'] <= time)
elif '<' in travel_time:
time = float(travel_time.replace('<', ''))
idx = (p['time'] < time)
elif '>=' in travel_time:
time = float(travel_time.replace('>=', ''))
idx = (p['time'] >= time)
elif '<' in travel_time:
time = float(travel_time.replace('>', ''))
idx = (p['time'] > time)
else:
try:
time = float(travel_time)
idx = (p['time'] <= time)
except:
errmsg = 'flopy.map.plot_pathline travel_time ' + \
'variable cannot be parsed. ' + \
'Acceptable logical variables are , ' + \
'<=, <, >=, and >. ' + \
'You passed {}'.format(travel_time)
raise Exception(errmsg)
else:
time = float(travel_time)
idx = (p['time'] <= time)
tp = p[idx]
vlc = []
# rotate data
x0r, y0r = self.sr.rotate(tp['x'], tp['y'], self.sr.rotation, 0.,
self.sr.yedge[0])
x0r += self.sr.xul
y0r += self.sr.yul - self.sr.yedge[0]
# build polyline array
arr = np.vstack((x0r, y0r)).T
# select based on layer
if kon >= 0:
kk = p['k'].copy().reshape(p.shape[0], 1)
kk = np.repeat(kk, 2, axis=1)
arr = np.ma.masked_where((kk != kon), arr)
else:
arr = np.ma.asarray(arr)
# append line to linecol if there is some unmasked segment
if not arr.mask.all():
linecol.append(arr)
# create line collection
lc = None
if len(linecol) > 0:
lc = LineCollection(linecol, **kwargs)
ax.add_collection(lc)
return lc
def plot_endpoint(self, ep, direction='ending',
selection=None, selection_direction=None, **kwargs):
"""
Plot the MODPATH endpoints.
Parameters
----------
ep : rec array
A numpy recarray with the endpoint particle data from the
MODPATH 6 endpoint file
direction : str
String defining if starting or ending particle locations should be
considered. (default is 'ending')
selection : tuple
tuple that defines the zero-base layer, row, column location
(l, r, c) to use to make a selection of particle endpoints.
The selection could be a well location to determine capture zone
for the well. If selection is None, all particle endpoints for
the user-sepcified direction will be plotted. (default is None)
selection_direction : str
String defining is a selection should be made on starting or
ending particle locations. If selection is not None and
selection_direction is None, the selection direction will be set
to the opposite of direction. (default is None)
kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The
remaining kwargs are passed into the matplotlib scatter
method. If colorbar is True a colorbar will be added to the plot.
If colorbar_label is passed in and colorbar is True then
colorbar_label will be passed to the colorbar set_label()
method. If shrink is passed in and colorbar is True then
the colorbar size will be set using shrink.
Returns
-------
sp : matplotlib.pyplot.scatter
"""
if direction.lower() == 'ending':
direction = 'ending'
elif direction.lower() == 'starting':
direction = 'starting'
else:
errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \
'or "starting".'
raise Exception(errmsg)
if direction == 'starting':
xp, yp = 'x0', 'y0'
elif direction == 'ending':
xp, yp = 'x', 'y'
if selection_direction is not None:
if selection_direction.lower() != 'starting' and \
selection_direction.lower() != 'ending':
errmsg = 'flopy.map.plot_endpoint selection_direction ' + \
'must be "ending" or "starting".'
raise Exception(errmsg)
else:
if direction.lower() == 'starting':
selection_direction = 'ending'
elif direction.lower() == 'ending':
selection_direction = 'starting'
if selection is not None:
try:
k, i, j = selection[0], selection[1], selection[2]
if selection_direction.lower() == 'starting':
ksel, isel, jsel = 'k0', 'i0', 'j0'
elif selection_direction.lower() == 'ending':
ksel, isel, jsel = 'k', 'i', 'j'
except:
errmsg = 'flopy.map.plot_endpoint selection must be a ' + \
'zero-based layer, row, column tuple (l, r, c) ' + \
'of the location to evaluate (i.e., well location).'
raise Exception(errmsg)
if selection is not None:
idx = (ep[ksel] == k) & (ep[isel] == i) & (ep[jsel] == j)
tep = ep[idx]
else:
tep = ep.copy()
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
# scatter kwargs that users may redefine
if 'c' not in kwargs:
c = tep['finaltime'] - tep['initialtime']
else:
c = np.empty((tep.shape[0]), dtype="S30")
c.fill(kwargs.pop('c'))
s = 50
if 's' in kwargs:
s = float(kwargs.pop('s')) ** 2.
elif 'size' in kwargs:
s = float(kwargs.pop('size')) ** 2.
# colorbar kwargs
createcb = False
if 'colorbar' in kwargs:
createcb = kwargs.pop('colorbar')
colorbar_label = 'Endpoint Time'
if 'colorbar_label' in kwargs:
colorbar_label = kwargs.pop('colorbar_label')
shrink = 1.
if 'shrink' in kwargs:
shrink = float(kwargs.pop('shrink'))
# rotate data
x0r, y0r = self.sr.rotate(tep[xp], tep[yp], self.sr.rotation, 0.,
self.sr.yedge[0])
x0r += self.sr.xul
y0r += self.sr.yul - self.sr.yedge[0]
# build array to plot
arr = np.vstack((x0r, y0r)).T
# plot the end point data
sp = plt.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs)
# add a colorbar for travel times
if createcb:
cb = plt.colorbar(sp, shrink=shrink)
cb.set_label(colorbar_label)
return sp
|
bsd-3-clause
|
ankurankan/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
rjfarmer/mesaplot
|
tests/test_mesaplot.py
|
1
|
2297
|
try:
import unittest as unittest
except ImportError:
import unittest2 as unittest
import mesaPlot as mp
import os
import matplotlib.pyplot as plt
os.chdir('tests')
class TestFileReader(unittest.TestCase):
def test_init(self):
m=mp.MESA()
def test_load_history1(self):
m=mp.MESA()
m.loadHistory()
def test_load_history2(self):
m=mp.MESA()
m.log_fold='LOGS/'
m.loadHistory()
def test_load_profile1(self):
m=mp.MESA()
m.loadProfile(num=1)
def test_load_profile2(self):
m=mp.MESA()
m.log_fold='LOGS/'
m.loadProfile(num=-1)
def test_load_profile3(self):
m=mp.MESA()
m.log_fold='LOGS/'
m.loadProfile(num=-2)
def test_load_profile3(self):
m=mp.MESA()
m.log_fold='LOGS/'
m.loadProfile(f='LOGS/profile1.data')
class TestPlot(unittest.TestCase):
def setUp(self):
self.m=mp.MESA()
self.p=mp.plot()
self.m.loadHistory()
self.m.loadProfile(num=1)
def tearDown(self):
plt.close('all')
def test_plotHR(self):
self.p.plotHR(self.m,show=False)
def test_plotNeu(self):
self.p.plotNeu(self.m,show=False)
def test_abun(self):
self.p.plotAbun(self.m,show=False)
def test_plotAbunByA(self):
self.p.plotAbunByA(self.m,show=False)
def test_plotAbunHist(self):
self.p.plotAbunHist(self.m,show=False)
def test_plotAbunPAndN(self):
self.p.plotAbunPAndN(self.m,show=False)
def test_plotAngMom(self):
self.p.plotAngMom(self.m,show=False)
def test_plotBurn(self):
self.p.plotBurn(self.m,show=False)
def test_plotBurnHist(self):
self.p.plotBurnHist(self.m,show=False)
def test_plotDynamo(self):
self.p.plotDynamo(self.m,show=False)
def test_plotHistory(self):
self.p.plotHistory(self.m,show=False)
def test_plotProfile(self):
self.p.plotProfile(self.m,show=False)
def test_plotKip(self):
self.p.plotKip(self.m,show=False)
def test_plotKip2(self):
self.p.plotKip2(self.m,show=False)
def test_plotKip3(self):
self.p.plotKip3(self.m,show=False)
def test_plotKip3(self):
self.p.plotKip3(self.m,show=False,age_lookback=True,xaxis='star_age')
def test_plotMix(self):
self.p.plotMix(self.m,show=False)
def test_plotTRho(self):
self.p.plotTRho(self.m,show=False)
def test_plotLdivM(self):
self.p.plotLdivM(self.m,show=False)
|
gpl-2.0
|
PmagPy/PmagPy
|
programs/qqplot.py
|
2
|
2180
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
qqplot.py
DESCRIPTION
makes qq plot of input data against a Normal distribution.
INPUT FORMAT
takes real numbers in single column
SYNTAX
qqplot.py [-h][-i][-f FILE]
OPTIONS
-f FILE, specify file on command line
-fmt [png,svg,jpg,eps] set plot output format [default is svg]
-sav saves and quits
OUTPUT
calculates the K-S D and the D expected for a normal distribution
when D<Dc, distribution is normal (at 95% level of confidence).
"""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
X= [] # set up list for data
for line in data: # read in the data from standard input
rec=line.split() # split each line on space to get records
X.append(float(rec[0])) # append data to X
#
QQ={'qq':1}
pmagplotlib.plot_init(QQ['qq'],5,5)
pmagplotlib.plot_qq_norm(QQ['qq'],X,'Q-Q Plot') # make plot
if plot==0:
pmagplotlib.draw_figs(QQ)
files={}
for key in list(QQ.keys()):
files[key]=key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Q-Q Plot'
QQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==0:
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a":
pmagplotlib.save_plots(QQ,files)
else:
pmagplotlib.save_plots(QQ,files)
#
if __name__ == "__main__":
main()
|
bsd-3-clause
|
NickC1/skCCM
|
skccm/skccm.py
|
1
|
9086
|
#
# Data for analyzing causality.
# By Nick Cortale
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
from . import utilities
import pandas as pd
import time
class CCM:
"""Convergent cross mapping for two embedded time series.
Parameters
----------
weights : str
Weighting scheme for predictions. Options:
- 'exp' : exponential weighting
score : str
How to score the predictions. Options:
- 'score'
- 'corrcoef'
verbose : bool
Prints out calculation status.
"""
def __init__(self, weights='exp', verbose=False):
self.weights = weights
self.verbose = verbose
def fit(self, X1_train, X2_train):
"""Fit the training data for ccm. Can be thought of as reconstructing the
shadow manifolds of each time series.
Amount of near neighbors is set to be embedding dimension plus one.
Creates seperate near neighbor regressors for X1 and X2 independently.
Parameters
----------
X1_train : 2d array
Embed time series of shape (num_samps,embed_dim).
X2_train : 2d array
Embed time series of shape (num_samps,embed_dim).
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1_train = X1_train
self.X2_train = X2_train
#to sorround a point, there must be ndim + 1 points
near_neighs = X1_train.shape[1] + 1
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict(self, X1_test, X2_test, lib_lengths):
"""Make a prediction.
Parameters
----------
X1_test : 2d array
Embed time series of shape (num_samps,embed_dim).
X2_test : 2d array
Embed time series of shape (num_samps,embed_dim).
lib_lengths : 1d array of ints
Library lengths to test.
Returns
-------
X1_pred : list of 2d arrays
Predictions for each library length.
X2_pred : list of 2d arrays
Predictions for each library length.
"""
#store X1_test and X2_test for use later
self.X1_test = X1_test
self.X2_test = X2_test
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(X1_test.shape)
x2_p = np.empty(X2_test.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1_train[:liblen], self.X1_train[:liblen])
self.knn2.fit(self.X2_train[:liblen], self.X2_train[:liblen])
dist1,ind1 = self.knn1.kneighbors(X1_test)
dist2,ind2 = self.knn2.kneighbors(X2_test)
for j in range(self.X1_train.shape[1]):
W1 = utilities.exp_weight(dist1)
W2 = utilities.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1_train[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2_train[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def score(self, score_metric='corrcoef'):
"""Evalulate the predictions.
Parameters
----------
how : string
How to score the predictions. Options:
- 'score'
- 'corrcoef'
Returns
-------
score_1 : 2d array
Scores for the first time series using the weights from the second
time series.
score_2 : 2d array
Scores for the second time series using the weights from the first
time series.
"""
num_preds = self.X1_train.shape[1]
score_1 = []
score_2 = []
for x1_p, x2_p in zip(self.X1_pred, self.X2_pred):
sc1 = np.empty(num_preds)
sc2 = np.empty(num_preds)
for ii in range(num_preds):
p1 = x1_p[:,ii]
p2 = x2_p[:,ii]
if score_metric == 'score':
sc1[ii] = utilities.score(p1,self.X1_test[:,ii])
sc2[ii] = utilities.score(p2,self.X2_test[:,ii])
if score_metric == 'corrcoef':
sc1[ii] = utilities.corrcoef(p1,self.X1_test[:,ii])
sc2[ii] = utilities.corrcoef(p2,self.X2_test[:,ii])
score_1.append( np.mean(sc1) )
score_2.append( np.mean(sc2) )
return score_1, score_2
class Embed:
"""Embed a time series.
Parameters
----------
X : 1D array
Time series to be embed.
"""
def __init__(self,X):
if type(X) is pd.pandas.core.frame.DataFrame:
self.df = X
else:
self.X = X
def df_mutual_information(self, max_lag):
"""Calculates the mutual information along each column of a dataframe.
Ensure that the time series is continuous in time and sampled regularly.
You can resample it hourly, daily, minutely etc. if needed.
Parameters
----------
max_lag : int
maximum amount to shift the time series
Returns
-------
mi : dataframe
columns are the columns of the original dataframe with rows being
the mutual information. shape(max_lag,num_cols)
"""
cols = self.df.columns
mi = np.empty((max_lag, len(cols)))
for i,col in enumerate(cols):
self.X = self.df[col].values
mi[:,i] = self.mutual_information(max_lag)
mi = pd.DataFrame(mi,columns=cols)
return mi
def mutual_information(self, max_lag):
"""Calculates the mutual information between the an unshifted time
series and a shifted time series.
Utilizes scikit-learn's implementation of the mutual information found
in sklearn.metrics.
Parameters
----------
max_lag : integer
Maximum amount to shift the time series.
Returns
-------
m_score : 1-D array
Mutual information at between the unshifted time series and the
shifted time series,
"""
#number of bins - say ~ 20 pts / bin for joint distribution
#and that at least 4 bins are required
N = max(self.X.shape)
num_bins = max(4.,np.floor(np.sqrt(N/20)))
num_bins = int(num_bins)
m_score = np.zeros((max_lag))
for jj in range(max_lag):
lag = jj+1
ts = self.X[0:-lag]
ts_shift = self.X[lag::]
min_ts = np.min(self.X)
max_ts = np.max(self.X)+.0001 #needed to bin them up
bins = np.linspace(min_ts,max_ts,num_bins+1)
bin_tracker = np.zeros_like(ts)
bin_tracker_shift = np.zeros_like(ts_shift)
for ii in range(num_bins):
locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] )
bin_tracker[locs] = ii
locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] )
bin_tracker_shift[locs_shift]=ii
m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift)
return m_score
def embed_vectors_1d(self, lag, embed):
"""Embeds vectors from a one dimensional time series in m-dimensional
space.
Parameters
----------
X : 1d array
Training or testing set.
lag : int
Lag value as calculated from the first minimum of the mutual info.
embed : int
Embedding dimension. How many lag values to take.
predict : int
Distance to forecast (see example).
Returns
-------
features : 2d array
Contains all of the embedded vectors. Shape (num_vectors,embed).
Example
-------
>>> X = [0,1,2,3,4,5,6,7,8,9,10]
em = 3
lag = 2
predict=3
>>> embed_vectors_1d
features = [[0,2,4], [1,3,5], [2,4,6], [3,5,7]]
"""
tsize = self.X.shape[0]
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = self.X[ii : end_val]
features[ii,:] = part[::lag]
return features
|
mit
|
raghavrv/scikit-learn
|
sklearn/cross_decomposition/cca_.py
|
151
|
3192
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
bsd-3-clause
|
fillycheezstake/MissionPlanner
|
Lib/site-packages/numpy/lib/function_base.py
|
53
|
108301
|
__docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
if sys.platform != 'cli':
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
else:
from _compiled_base import _insert, bincount
# TODO: Implement these
def add_docstring(*args, **kw):
pass
def digitize(*args, **kw):
raise NotImplementedError()
def compiled_interp(*args, **kw):
raise NotImplementedError()
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), normed=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, normed=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if range is None:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
uniform = True
else:
bins = asarray(bins)
uniform = False
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if normed:
db = array(np.diff(bins), float)
if not uniform:
warnings.warn("""
This release of NumPy fixes a normalization bug in histogram
function occuring with non-uniform bin widths. The returned
value is now a density: n / (N * bin width), where n is the
bin count and N the total number of points.
""")
return n/db/n.sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : boolean, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1D histogram
histogram2d: 2D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
nbin = asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
decimal = int(-log10(dedges[i].min())) +6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
return _nanop(np.min, np.inf, a, axis)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
return _nanop(np.max, -np.inf, a, axis)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {None, int}, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
percentile to compute which must be between 0 and 100 inclusive
axis : {None, int}, optional
Axis along which the percentiles are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest neighbors
is used if the normalized ranking does not match q exactly.
The same as the median if q is 0.5; the same as the min if q is 0;
and the same as the max if q is 1
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 0.5)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 0.5, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 0.5, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
gpl-3.0
|
andreabduque/GAFE
|
main.py
|
1
|
1148
|
from functions.FE import FE
from fitness import Classifier
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import ga
def testIris():
iris = pd.read_csv("data/iris.data", sep=",")
irisAtts = iris.drop("class", 1)
target = iris["class"]
#Scale to [0,1] before expanding
scaledIris = MinMaxScaler().fit_transform(irisAtts)
bestSingleMatch = {'knn': [(1,5) for x in range(4)], 'cart': [(3,2) for x in range(4)], 'svm': [(7,4) for x in range(4)]}
functionalExp = FE()
for cl in ['knn', 'cart', 'svm']:
model = Classifier(cl, target, folds=10, jobs=6)
print("original accuracy " + cl + " " + str(model.getAccuracy(irisAtts)))
expandedData = functionalExp.expandMatrix(scaledIris, bestSingleMatch[cl])
print("single match expansion accuracy " + cl + " " + str(model.getAccuracy(expandedData)))
gafe = ga.GAFE(model, scaledIris, target, scaled=True)
avg, bestPair = gafe.runGAFE(n_population=21, n_iter=1, verbose=True)
print("gafe " + cl + " " + str(avg) )
def main():
testIris()
if __name__ == "__main__":
main()
|
mit
|
freedomtan/workload-automation
|
wlauto/workloads/telemetry/__init__.py
|
2
|
11987
|
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import os
import re
import csv
import math
import shutil
import json
import urllib
import stat
from zipfile import is_zipfile, ZipFile
from collections import defaultdict
try:
import pandas as pd
except ImportError:
pd = None
from wlauto import Workload, Parameter
from wlauto.exceptions import WorkloadError, ConfigError
from wlauto.utils.misc import check_output, get_null, get_meansd
from wlauto.utils.types import numeric, identifier
RESULT_REGEX = re.compile(r'RESULT ([^:]+): ([^=]+)\s*=\s*' # preamble and test/metric name
r'(\[([^\]]+)\]|(\S+))' # value
r'\s*(\S+)') # units
TRACE_REGEX = re.compile(r'Trace saved as ([^\n]+)')
# Trace event that signifies rendition of a Frame
FRAME_EVENT = 'SwapBuffersLatency'
TELEMETRY_ARCHIVE_URL = 'http://storage.googleapis.com/chromium-telemetry/snapshots/telemetry.zip'
class Telemetry(Workload):
name = 'telemetry'
description = """
Executes Google's Telemetery benchmarking framework
Url: https://www.chromium.org/developers/telemetry
From the web site:
Telemetry is Chrome's performance testing framework. It allows you to
perform arbitrary actions on a set of web pages and report metrics about
it. The framework abstracts:
- Launching a browser with arbitrary flags on any platform.
- Opening a tab and navigating to the page under test.
- Fetching data via the Inspector timeline and traces.
- Using Web Page Replay to cache real-world websites so they don't
change when used in benchmarks.
Design Principles
- Write one performance test that runs on all platforms - Windows, Mac,
Linux, Chrome OS, and Android for both Chrome and ContentShell.
- Runs on browser binaries, without a full Chromium checkout, and without
having to build the browser yourself.
- Use WebPageReplay to get repeatable test results.
- Clean architecture for writing benchmarks that keeps measurements and
use cases separate.
- Run on non-Chrome browsers for comparative studies.
This instrument runs telemetry via its ``run_benchmark`` script (which
must be in PATH or specified using ``run_benchmark_path`` parameter) and
parses metrics from the resulting output.
**device setup**
The device setup will depend on whether you're running a test image (in
which case little or no setup should be necessary)
"""
parameters = [
Parameter('run_benchmark_path', default=None,
description="""
This is the path to run_benchmark script which runs a
Telemetry benchmark. If not specified, the assumption will be
that it is in path (i.e. with be invoked as ``run_benchmark``).
"""),
Parameter('test', default='page_cycler.top_10_mobile',
description="""
Specifies the telemetry test to run.
"""),
Parameter('run_benchmark_params', default='',
description="""
Additional paramters to be passed to ``run_benchmark``.
"""),
Parameter('run_timeout', kind=int, default=900,
description="""
Timeout for execution of the test.
"""),
Parameter('extract_fps', kind=bool, default=False,
description="""
if ``True``, FPS for the run will be computed from the trace (must be enabled).
"""),
]
def validate(self):
ret = os.system('{} > {} 2>&1'.format(self.run_benchmark_path, get_null()))
if ret > 255:
pass # telemetry found and appears to be installed properly.
elif ret == 127:
raise WorkloadError('run_benchmark not found (did you specify correct run_benchmark_path?)')
else:
raise WorkloadError('Unexected error from run_benchmark: {}'.format(ret))
if self.extract_fps and 'trace' not in self.run_benchmark_params:
raise ConfigError('"trace" profiler must be enabled in order to extract FPS for Telemetry')
self._resolve_run_benchmark_path()
def setup(self, context):
self.raw_output = None
self.command = self.build_command()
def run(self, context):
self.logger.debug(self.command)
self.raw_output, _ = check_output(self.command, shell=True, timeout=self.run_timeout, ignore='all')
def update_result(self, context): # pylint: disable=too-many-locals
if not self.raw_output:
self.logger.warning('Did not get run_benchmark output.')
return
raw_outfile = os.path.join(context.output_directory, 'telemetry_raw.out')
with open(raw_outfile, 'w') as wfh:
wfh.write(self.raw_output)
context.add_artifact('telemetry-raw', raw_outfile, kind='raw')
results, artifacts = parse_telemetry_results(raw_outfile)
csv_outfile = os.path.join(context.output_directory, 'telemetry.csv')
with open(csv_outfile, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(['kind', 'url', 'iteration', 'value', 'units'])
for result in results:
writer.writerows(result.rows)
for i, value in enumerate(result.values, 1):
context.add_metric(result.kind, value, units=result.units,
classifiers={'url': result.url, 'time': i})
context.add_artifact('telemetry', csv_outfile, kind='data')
for idx, artifact in enumerate(artifacts):
if is_zipfile(artifact):
zf = ZipFile(artifact)
for item in zf.infolist():
zf.extract(item, context.output_directory)
zf.close()
context.add_artifact('telemetry_trace_{}'.format(idx), path=item.filename, kind='data')
else: # not a zip archive
wa_path = os.path.join(context.output_directory,
os.path.basename(artifact))
shutil.copy(artifact, wa_path)
context.add_artifact('telemetry_artifact_{}'.format(idx), path=wa_path, kind='data')
if self.extract_fps:
self.logger.debug('Extracting FPS...')
_extract_fps(context)
def build_command(self):
device_opts = ''
if self.device.platform == 'chromeos':
if '--remote' not in self.run_benchmark_params:
device_opts += '--remote={} '.format(self.device.host)
if '--browser' not in self.run_benchmark_params:
device_opts += '--browser=cros-chrome '
elif self.device.platform == 'android':
if '--device' not in self.run_benchmark_params and self.device.adb_name:
device_opts += '--device={} '.format(self.device.adb_name)
if '--browser' not in self.run_benchmark_params:
device_opts += '--browser=android-webview-shell '
else:
raise WorkloadError('Currently, telemetry workload supports only ChromeOS or Android devices.')
return '{} {} {} {}'.format(self.run_benchmark_path,
self.test,
device_opts,
self.run_benchmark_params)
def _resolve_run_benchmark_path(self):
# pylint: disable=access-member-before-definition
if self.run_benchmark_path:
if not os.path.exists(self.run_benchmark_path):
raise ConfigError('run_benchmark path "{}" does not exist'.format(self.run_benchmark_path))
else:
self.run_benchmark_path = os.path.join(self.dependencies_directory, 'telemetry', 'run_benchmark')
self.logger.debug('run_benchmark_path not specified using {}'.format(self.run_benchmark_path))
if not os.path.exists(self.run_benchmark_path):
self.logger.debug('Telemetry not found locally; downloading...')
local_archive = os.path.join(self.dependencies_directory, 'telemetry.zip')
urllib.urlretrieve(TELEMETRY_ARCHIVE_URL, local_archive)
zf = ZipFile(local_archive)
zf.extractall(self.dependencies_directory)
if not os.path.exists(self.run_benchmark_path):
raise WorkloadError('Could not download and extract Telemetry')
old_mode = os.stat(self.run_benchmark_path).st_mode
os.chmod(self.run_benchmark_path, old_mode | stat.S_IXUSR)
def _extract_fps(context):
trace_files = [a.path for a in context.iteration_artifacts
if a.name.startswith('telemetry_trace_')]
for tf in trace_files:
name = os.path.splitext(os.path.basename(tf))[0]
fps_file = os.path.join(context.output_directory, name + '-fps.csv')
with open(tf) as fh:
data = json.load(fh)
events = pd.Series([e['ts'] for e in data['traceEvents'] if
FRAME_EVENT == e['name']])
fps = (1000000 / (events - events.shift(1)))
fps.index = events
df = fps.dropna().reset_index()
df.columns = ['timestamp', 'fps']
with open(fps_file, 'w') as wfh:
df.to_csv(wfh, index=False)
context.add_artifact('{}_fps'.format(name), fps_file, kind='data')
context.result.add_metric('{} FPS'.format(name), df.fps.mean(),
units='fps')
context.result.add_metric('{} FPS (std)'.format(name), df.fps.std(),
units='fps', lower_is_better=True)
class TelemetryResult(object):
@property
def average(self):
return get_meansd(self.values)[0]
@property
def std(self):
return get_meansd(self.values)[1]
@property
def rows(self):
for i, v in enumerate(self.values):
yield [self.kind, self.url, i, v, self.units]
def __init__(self, kind=None, url=None, values=None, units=None):
self.kind = kind
self.url = url
self.values = values or []
self.units = units
def __str__(self):
return 'TR({kind},{url},{values},{units})'.format(**self.__dict__)
__repr__ = __str__
def parse_telemetry_results(filepath):
results = []
artifacts = []
with open(filepath) as fh:
for line in fh:
match = RESULT_REGEX.search(line)
if match:
result = TelemetryResult()
result.kind = match.group(1)
result.url = match.group(2)
if match.group(4):
result.values = map(numeric, match.group(4).split(','))
else:
result.values = [numeric(match.group(5))]
result.units = match.group(6)
results.append(result)
match = TRACE_REGEX.search(line)
if match:
artifacts.append(match.group(1))
return results, artifacts
if __name__ == '__main__':
import sys
from pprint import pprint
path = sys.argv[1]
pprint(parse_telemetry_results(path))
|
apache-2.0
|
PatrickChrist/scikit-learn
|
sklearn/cluster/mean_shift_.py
|
96
|
15434
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
bsd-3-clause
|
Urinx/Machine_Learning
|
SVM/svm.py
|
1
|
8600
|
#! /usr/bin/env python
# coding:utf-8
#########################################
# SVM #
#########################################
from numpy import *
import numpy as np
from random import random
from matplotlib.pyplot import *
from pylab import *
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin_cg
from scipy.io import loadmat
class ML():
def __init__(self,x=[],y=[]):
self.X=x
self.Y=y
self.Theta=[]
self.Alpha=0.01
self.Iterations=50
self.Lambda=1
def load(self,fname,d=','):
data=loadtxt(fname,delimiter=d)
self.X=data[:,:-1]
self.Y=data[:,-1:]
def loadMat(self,fname):
return loadmat(fname)
def initXY(self,data):
m=data.shape[0]
x=hstack((ones((m,1)),data))
return x,self.Y,m
# Feature Normalize
def Normalization(self,data):
mu=mean(data,0)
sigma=std(data,0)
data_Norm=(data-mu)/sigma
return data_Norm,mu,sigma
def sigmoid(self,z):
return 1/(1+exp(-z))
def sigmoidGradient(self,z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def J(self):
pass
def predict(self,x):
return array([1]+x).dot(self.Theta)
def evaluate(self):
pass
# x,x^2,x^3,....x^p
def polyFeatures(self,x,p):
x_poly=zeros((x.shape[0],p))
for i in xrange(p):
x_poly[:,i:i+1]=x**(i+1)
return x_poly
# x1,x2,x1*x2,...
def mapFeature(self,data,k):
x1,x2=data[:,0:1],data[:,1:]
m=x1.shape[0]
x=ones((m,1))
for i in xrange(1,k+1):
for j in xrange(i+1):
x=hstack((x,x1**j+x2**(i-j)))
return x
def addOne(self,x):
m=x.shape[0]
one=ones((m,1))
return hstack((one,x))
def plot(self):
pass
def show(self):
show()
class SVM(ML):
def __init__(self,fname,x=[],y=[]):
self.Lambda=1
self.Theta=[]
mat=self.loadMat(fname)
self.X=mat['X']
self.Y=mat['y']
if 'Xval' in mat:
self.Xval=mat['Xval']
self.Yval=mat['yval']
#self.Xtest=mat['Xtest']
# x1,x2: column vectors
def linearKernel(self,x1,x2):
sim=x1.T.dot(x2)
return sim
# To find non-linear decision boundaries
def gaussianKernel(self,x1,x2,sigma):
sim=e**(-sum((x1-x2)**2)/(2.0*sigma**2))
return sim
def svmTrain(self,X,Y,C,kernelFunction,tol=1e-3,max_passes=5):
# An SVM classifier using a simplified version of the SMO algorithm
# X: matrix of training examples
# Y: column matrix containing 1 for positive examples and 0 for negative ones
# C: standard SVM regularization parameter
# tol: tolerance value used for determining equality of floating point number
# max_passes: control the number of iterations over the dataset
m,n=X.shape
# Map 0 to -1
# The data in Octave is alright but loaded in python
# cause wrong calculate result, and it cost my some
# time to find this problem.
# Because Y.dtype is unit8, it's overfloat when I set -1
# So change type to int64
Y=Y.astype('int64')
Y[Y==0]=-1
# Variables
alphas=zeros((m,1))
b=0
E=zeros((m,1))
passes=0
eta=0
L=0
H=0
fcn=kernelFunction.func_name
if fcn=='linearKernel':
K=X.dot(X.T)
elif fcn=='gaussianKernel':
X2=sum(X**2,1).reshape((m,1))
K=X2+X2.T-2*(X.dot(X.T))
K=kernelFunction(1,0)**K
else:
K=zeros((m,m))
for i in xrange(m):
for j in xrange(j,m):
K[i,j]=kernelFunction(X[i,:].T,X[j,:].T)
K[j,i]=K[i,j]
# Train
while passes<max_passes:
num_change_alphas=0
for i in xrange(m):
E[i]=b+sum(alphas*Y*K[:,i:i+1])-Y[i]
if (Y[i]*E[i]<-tol and alphas[i]<C) or (Y[i]*E[i]>tol and alphas[i]>0):
# randomly select j
j=int(floor(m*random()))
while j==i:
j=int(floor(m*random()))
E[j]=b+sum(alphas*Y*K[:,j:j+1])-Y[j]
# Save old alphas
# Here is a uneasy find problem if your code like this:
# alpha_i_old=alphas[i]
# The alpha_i_old will change if alphas[i] has changed
# So fixed as following:
alpha_i_old=float(alphas[i])
alpha_j_old=float(alphas[j])
# Compute L and H
if Y[i]==Y[j]:
L=max(0,float(alphas[j]+alphas[i]-C))
H=min(C,float(alphas[j]+alphas[i]))
else:
L=max(0,float(alphas[j]-alphas[i]))
H=min(C,float(C+alphas[j]-alphas[i]))
if L==H:
continue
# Compute eta
eta=2*K[i,j]-K[i,i]-K[j,j]
if eta>=0:
continue
# Compute and clip new value for alpha[j]
alphas[j]=alphas[j]-(Y[j]*(E[i]-E[j]))/eta
alphas[j]=min(H,float(alphas[j]))
alphas[j]=max(L,float(alphas[j]))
# Check if change in alpha is significant
if abs(alphas[j]-alpha_j_old)<tol:
alphas[j]=alpha_j_old
continue
# Determine value for alpha[i]
alphas[i]=alphas[i]+Y[i]*Y[j]*(alpha_j_old-alphas[j])
# Compute b1 and b2
b1=b-E[i]-Y[i]*(alphas[i]-alpha_i_old)*K[i,j]-Y[j]*(alphas[j]-alpha_j_old)*K[i,j]
b2=b-E[j]-Y[i]*(alphas[i]-alpha_i_old)*K[i,j]-Y[j]*(alphas[j]-alpha_j_old)*K[j,j]
# Compute b
if 0<alphas[i] and alphas[i]<C :
b=b1
elif 0<alphas[j] and alphas[j]<C:
b=b2
else:
b=(b1+b2)/2
num_change_alphas+=1
if num_change_alphas==0:
passes+=1
else:
passes=0
class Model():
def __init__(self):
self.X=array([])
self.Y=array([])
self.kernelFunction=0
self.b=0
self.alphas=0
self.w=0
# Save the model
model=Model()
#idx=alphas>0
#model.X=X[idx,:]
#model.Y=Y[idx,:]
idx=where(alphas>0)[0]
model.X=X[idx]
model.Y=Y[idx]
model.kernelFunction=kernelFunction
model.b=b
model.alphas=alphas[idx]
model.w=((alphas*Y).T.dot(X)).T
return model
# x: m x n matrix, each example is a row
# pred: m x 1 column of prediction of {0,1} values
def svmPredict(self,model,x):
m=x.shape[0]
M=model.X.shape[0]
p=zeros((m,1))
pred=zeros((m,1))
if model.kernelFunction.func_name=='linearKernel':
p=x.dot(model.w)+model.b
elif model.kernelFunction.func_name=='gaussianKernel':
x1=sum(x**2,1).reshape((m,1))
x2=sum(model.X**2,1).reshape((M,1)).T
K=x1+(x2-2*x.dot(model.X.T))
K=model.kernelFunction(1,0)**K
K=model.Y.T*K
K=model.alphas.T*K
p=sum(K,1)
else:
for i in xrange(m):
prediction=0
for j in xrange(M):
prediction+=model.alphas[j]*model.Y[j]*model.kernelFunction(x[i:i+1,:].T,model.X[j:j+1,:].T)
p[i]=prediction+model.b
pred[p>=0]=1
pred[p<0]=0
return pred
# Determine the best C and sigma parameter to use
def selectParams(self,x,y,xval,yval):
C=1
sigma=0.3
m=1
param=[0.01,0.03,0.1,0.3,1,3,10,30]
for s in param:
def gaussianKernel(x1,x2):
return self.gaussianKernel(x1,x2,s)
for c in param:
model=self.svmTrain(x,y,c,gaussianKernel)
prediction=self.svmPredict(model,xval)
tmp=mean(double(prediction!=yval))
if tmp<m:
m=tmp
C=c
sigma=s
return C,sigma
#################
# Plot Function #
#################
def plotData(self):
pos,neg=where(self.Y==1),where(self.Y==0)
plot(self.X[pos,0],self.X[pos,1],'k+',markersize=7,linewidth=1)
plot(self.X[neg,0],self.X[neg,1],'ro',markersize=7,linewidth=1)
return self
# plot a linear decision boundary learned by the SVM
def visualizeBoundaryLinear(self,x,y,model):
w=model.w
b=model.b
xp=array([min(x[:,0]),max(x[:,0])])
yp=-(w[0]*xp+b)/w[1]
self.plotData()
plot(xp,yp,'g-')
return self
def visualizeBoundary(self,x,y,model):
self.plotData()
x1plot=linspace(min(x[:,0]),max(x[:,0]),100)
x2plot=linspace(min(x[:,1]),max(x[:,1]),100)
x1,x2=meshgrid(x1plot,x2plot)
vals=zeros(x1.shape)
for i in xrange(x1.shape[1]):
this_x=hstack((x1[:,i:i+1],x2[:,i:i+1]))
vals[:,i:i+1]=self.svmPredict(model,this_x)
contour(x1,x2,vals,(0,0))
return self
##################
def trainLinearSVM(self):
x,y=self.X,self.Y
C=1
kernel=self.linearKernel
model=self.svmTrain(x,y,C,kernel,1e-3,20)
self.visualizeBoundaryLinear(x,y,model).show()
def trainNonLinearSVM(self):
x,y=self.X,self.Y
C=1
sigma=0.1
def gaussianKernel(x1,x2):
return self.gaussianKernel(x1,x2,sigma)
kernel=gaussianKernel
model=self.svmTrain(x,y,C,kernel)
self.visualizeBoundary(x,y,model).show()
def findBestParams(self):
x,y=self.X,self.Y
xval,yval=self.Xval,self.Yval
C,sigma=self.selectParams(x,y,xval,yval)
def gaussianKernel(x1,x2):
return self.gaussianKernel(x1,x2,sigma)
model=self.svmTrain(x,y,C,gaussianKernel)
self.visualizeBoundary(x,y,model).show()
if __name__=='__main__':
test=SVM('ex6data1.mat')
#test.plotData().show()
#test.trainLinearSVM()
test2=SVM('ex6data2.mat')
#test2.plotData().show()
#test2.trainNonLinearSVM()
test3=SVM('ex6data3.mat')
#test3.plotData().show()
#test3.findBestParams()
|
gpl-2.0
|
johndamen/pyeasyplot
|
easyplot/gui/basewidgets.py
|
1
|
13141
|
from PyQt4 import QtGui, QtCore
from collections import OrderedDict
import numpy as np
import re
from matplotlib import colors, cm
class InvalidColorError(Exception): pass
class SettingWidget(QtGui.QWidget):
value_changed = QtCore.pyqtSignal(object)
def __init__(self, val=None, parent=None, **kwargs):
super().__init__(parent=parent)
self.build(**kwargs)
if val is not None:
self.set_value(val)
def build(self, **kwargs):
pass
def value(self):
pass
def set_value(self, v):
pass
def is_empty(self):
pass
def changed(self, *args):
self.value_changed.emit(self.value())
class Fieldset(SettingWidget):
def __init__(self, ftype, val=(), parent=None, field_width=None, **fkwargs):
self.ftype = ftype
self.fkwargs = fkwargs
val = tuple(val)
self.fieldcount = len(val)
super().__init__(val=val, parent=parent, field_width=field_width)
def build(self, field_width=None):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(3)
self.fields = []
for i in range(self.fieldcount):
field = self.ftype(val=None, **self.fkwargs)
if field_width is not None:
field.setFixedWidth(field_width)
self.fields.append(field)
field.value_changed.connect(self.changed)
self.layout.addWidget(field)
def value(self):
return tuple(f.value() for f in self.fields)
def set_value(self, v):
if len(v) != self.fieldcount:
raise ValueError('invalid value for fieldset of length {}'.format(self.fieldcount))
for i, f in enumerate(self.fields):
f.set_value(v[i])
def __getitem__(self, item):
return self.fields[item]
class Text(SettingWidget):
def __init__(self, val, fmt='{}', parent=None, **kwargs):
self.fmt = fmt
super().__init__(val, parent=parent, **kwargs)
def build(self, onchange=False):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.textfield = QtGui.QLineEdit()
if onchange:
self.textfield.textChanged.connect(self.changed)
else:
self.textfield.editingFinished.connect(self.changed)
self.layout.addWidget(self.textfield)
def value(self):
return str(self.textfield.text()) or None
def set_value(self, v, ifempty=False):
if ifempty and not self.is_empty():
return
self.textfield.setText(self._format_value(v))
def is_empty(self):
return str(self.textfield.text()) == ''
def _format_value(self, v):
return self.fmt.format(v)
class TextOrNone(Text):
def __init__(self, val=None, **kwargs):
super().__init__(val, **kwargs)
def value(self):
s = str(self.textfield.text())
if s == '':
return None
return self._cast(s)
def _cast(self, v):
return str(v)
class Int(TextOrNone):
def _cast(self, v):
return int(v)
class Float(TextOrNone):
def _cast(self, v):
return float(v)
class MinMax(SettingWidget):
def __init__(self, *args, fmt='{}', parent=None):
if len(args) == 1:
vmin, vmax = args[0]
elif len(args) > 2:
raise ValueError('invalid number of values')
else:
vmin, vmax = args
self.vmin = vmin
self.vmax = vmax
self.fmt = fmt
super().__init__(parent=parent)
def build(self):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.vmin_field = Float(self.vmin)
self.vmin_field.value_changed.connect(self.changed)
self.layout.addWidget(self.vmin_field)
self.vmax_field = Float(self.vmax)
self.vmax_field.value_changed.connect(self.changed)
self.layout.addWidget(self.vmax_field)
def value(self):
return self.vmin_field.value(), self.vmax_field.value()
def setValue(self, *args, ifempty=False):
if ifempty and not self.is_empty():
return
if len(args) == 1:
vmin, vmax = args[0]
elif len(args) > 2:
raise ValueError('invalid number of values')
else:
vmin, vmax = args
self.vmin_field.set_value(vmin)
self.vmax_field.set_value(vmax)
def isempty(self):
return self.vmin_field.is_empty() and self.vmax_field.is_empty()
class ListedSlider(SettingWidget):
def __init__(self, values, default=None, fmt='{:.2f}', parent=None):
self.values = values
if default is None:
default = self.values.size // 2
self.i = int(default)
self.fmt = fmt
super().__init__(parent=parent)
def build(self):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.slider = QtGui.QSlider()
self.slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.slider.setOrientation(QtCore.Qt.Horizontal)
self.slider.setMinimum(0)
self.slider.setMaximum(self.values.size-1)
self.slider.setSingleStep(1)
self.slider.setTickInterval(1)
self.slider.setSliderPosition(self.i)
self.slider.valueChanged.connect(self.changed)
self.slider.setFixedWidth(100)
self.layout.addWidget(self.slider)
self.label = QtGui.QLabel(self.format_value())
self.label.setFixedWidth(60)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.label)
def changed(self, *args):
self.set_from_slider(*args)
super().changed(*args)
def slider2value(self, v):
return self.vmin + v * self.step
def value2slider(self, v):
return np.floor(1e-5+(v - self.vmin) / self.step)
def set_from_slider(self, i):
self.i = int(i)
self.label.setText(self.format_value())
def value(self):
return self.values[self.i]
def format_value(self):
return self.fmt.format(self.value())
class RangeSlider(ListedSlider):
def __init__(self, vmin, vmax, step, default=None, decimals=None, parent=None):
values = np.arange(vmin, vmax+step, step)
if decimals is None:
decimals = int(max(0, np.ceil(-np.log10(step))))
fmt = '{:.'+str(decimals)+'f}'
super().__init__(values, default=default, fmt=fmt, parent=parent)
class Dropdown(SettingWidget):
def __init__(self, opts, default_index=0, parent=None):
if isinstance(opts, list):
if opts and isinstance(opts[0], str):
opts = [(o, o) for o in opts]
opts = OrderedDict(opts)
elif not isinstance(opts, OrderedDict):
raise TypeError('invalid options')
self.opts = opts
self.default_index = default_index
super().__init__(parent=parent)
def build(self):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.dd = QtGui.QComboBox()
self.dd.addItem('default')
self.dd.addItems(list(self.opts.keys()))
self.dd.setCurrentIndex(self.default_index)
self.dd.currentIndexChanged.connect(self.changed)
self.layout.addWidget(self.dd)
def value(self):
i = self.dd.currentIndex()
if i == 0:
return None
else:
return list(self.opts.values())[i-1]
class Color(SettingWidget):
def __init__(self, color=None, **kwargs):
self.colortuple_patterns = [
(self.parse_int, re.compile(r'^\(?([0-9]+),\s*([0-9]+),\s*([0-9]+)\)?\s*$')),
(self.parse_float, re.compile(r'^\(?([0-9\.]+),\s*([0-9\.]+),\s*([0-9\.]+)\)?\s*$'))]
self.color = color
super().__init__(**kwargs)
def scale_colorvalue(self, v):
scaled_v = float(v)/255
return max(min(scaled_v, 1), 0)
def build(self):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.textfield = QtGui.QLineEdit(self.format_color(self.color))
self.textfield.editingFinished.connect(self.changed)
self.textfield.setFixedWidth(120)
self.layout.addWidget(self.textfield)
self.pick_button = QtGui.QPushButton('...')
self.pick_button.clicked.connect(self.pick)
self.layout.addWidget(self.pick_button)
def parse_int(self, v):
scaled_v = float(v)/255
return self.parse_float(scaled_v)
def parse_float(self, v):
v = float(v)
if v > 1 or v < 0:
raise ValueError(str(v))
return v
def parse_color_tuple(self, t):
if len(t) != 3:
raise InvalidColorError(str(t))
if all(isinstance(v, int) for v in t):
val_parser = self.parse_int
elif all(isinstance(v, float) for v in t):
val_parser = self.parse_float
else:
raise InvalidColorError(str(t))
return tuple(val_parser(v) for v in t)
def parse_color_str(self, s):
for t, p in self.colortuple_patterns:
m = p.match(s)
if not m:
continue
try:
return tuple(t(item) for item in m.groups())
except ValueError:
raise InvalidColorError(s)
if colors.is_color_like(s):
return s
else:
raise InvalidColorError(s)
def parse_color(self, v):
if v is None:
return v
try:
if isinstance(v, str):
return self.parse_color_str(v)
elif isinstance(v, tuple):
return self.parse_color_tuple(v)
except InvalidColorError as e:
QtGui.QMessageBox.warning(self, 'invalid color', 'invalid color value: {}'.format(e))
self.textfield.setText(self.format_color(self.color))
def format_color(self, c):
if c is None:
return ''
if isinstance(c, tuple) and len(c) == 3:
if all(isinstance(v, float) for v in c):
return '({:.2f}, {:.2f}, {:.2f})'.format(*c)
else:
return '({}, {}, {})'.format(*c)
return str(c)
@property
def color(self):
return self._color
@color.setter
def color(self, v):
self._color = self.parse_color(v)
def pick(self):
color = QtGui.QColorDialog.getColor()
rgb = (self.scale_colorvalue(color.red()),
self.scale_colorvalue(color.green()),
self.scale_colorvalue(color.blue()))
self.color = rgb
self.textfield.setText(self.format_color(rgb))
self.changed()
def value(self):
s = str(self.textfield.text())
if s == '':
return None
else:
self.color = s
return self.color
class Colormap(Dropdown):
def __init__(self, default, parent=None):
opts = OrderedDict(self.list_colormaps())
if isinstance(default, str):
default_cmap = cm.get_cmap(default)
elif isinstance(default, colors.Colormap):
default, default_cmap = default.name, default
opts[default] = default_cmap
default_index = list(opts.keys()).index(default)
super().__init__(opts, default_index=default_index, parent=parent)
@classmethod
def list_colormaps(cls):
return [(name, c) for name, c in cm.__dict__.items() if isinstance(c, colors.Colormap)]
class Checkbox(SettingWidget):
def __init__(self, checked):
super().__init__(checked)
def build(self):
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.cb = QtGui.QCheckBox()
self.cb.setChecked(False)
self.cb.toggled.connect(self.changed)
self.layout.addWidget(self.cb)
def set_value(self, b):
self.cb.setChecked(bool(b))
def value(self):
return self.cb.isChecked()
class ClickableLabel(QtGui.QLabel):
clicked = QtCore.pyqtSignal()
def mousePressEvent(self, QMouseEvent):
self.clicked.emit()
class ToggleLabel(ClickableLabel):
toggled = QtCore.pyqtSignal(bool)
def __init__(self, *args, selected=False, toggle_internal=True, **kwargs):
super().__init__(*args, **kwargs)
self.set_selected(selected)
self.toggle_internal = toggle_internal
self.clicked.connect(self.toggle)
def toggle(self):
print('toggled', self)
if self.toggle_internal:
self.set_selected(not self.selected)
self.toggled.emit(self.selected)
def set_selected(self, b):
self.selected = b
if b:
self.setStyleSheet('''text-decoration: underline''')
else:
self.setStyleSheet('''''')
|
gpl-3.0
|
plaidml/plaidml
|
networks/keras/examples/mnist_swwae.py
|
1
|
7875
|
'''Trains a stacked what-where autoencoder built on residual blocks on the
MNIST dataset. It exemplifies two influential methods that have been developed
in the past few years.
The first is the idea of properly 'unpooling.' During any max pool, the
exact location (the 'where') of the maximal value in a pooled receptive field
is lost, however it can be very useful in the overall reconstruction of an
input image. Therefore, if the 'where' is handed from the encoder
to the corresponding decoder layer, features being decoded can be 'placed' in
the right location, allowing for reconstructions of much higher fidelity.
References:
[1]
'Visualizing and Understanding Convolutional Networks'
Matthew D Zeiler, Rob Fergus
https://arxiv.org/abs/1311.2901v3
[2]
'Stacked What-Where Auto-encoders'
Junbo Zhao, Michael Mathieu, Ross Goroshin, Yann LeCun
https://arxiv.org/abs/1506.02351v8
The second idea exploited here is that of residual learning. Residual blocks
ease the training process by allowing skip connections that give the network
the ability to be as linear (or non-linear) as the data sees fit. This allows
for much deep networks to be easily trained. The residual element seems to
be advantageous in the context of this example as it allows a nice symmetry
between the encoder and decoder. Normally, in the decoder, the final
projection to the space where the image is reconstructed is linear, however
this does not have to be the case for a residual block as the degree to which
its output is linear or non-linear is determined by the data it is fed.
However, in order to cap the reconstruction in this example, a hard softmax is
applied as a bias because we know the MNIST digits are mapped to [0,1].
References:
[3]
'Deep Residual Learning for Image Recognition'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1512.03385v1
[4]
'Identity Mappings in Deep Residual Networks'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1603.05027v3
'''
from __future__ import print_function
import numpy as np
import keras.callbacks
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D
from keras.layers import Input, BatchNormalization, ELU
# import matplotlib.pyplot as plt
import keras.backend as K
from keras import layers
from example_correctness_test_utils import TrainingHistory, StopwatchManager
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
"""The proposed residual block from [4].
Running with elu=True will use ELU nonlinearity and running with
elu=False will use BatchNorm + RELU nonlinearity. While ELU's are fast
due to the fact they do not suffer from BatchNorm overhead, they may
overfit because they do not offer the stochastic element of the batch
formation process of BatchNorm, which acts as a good regularizer.
# Arguments
x: 4D tensor, the tensor to feed through the block
nfeats: Integer, number of feature maps for conv layers.
ksize: Integer, width and height of conv kernels in first convolution.
nskipped: Integer, number of conv layers for the residual function.
elu: Boolean, whether to use ELU or BN+RELU.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
`(batch, filters, rows, cols)`
"""
y0 = Conv2D(nfeats, ksize, padding='same')(x)
y = y0
for i in range(nskipped):
if elu:
y = ELU()(y)
else:
y = BatchNormalization(axis=1)(y)
y = Activation('relu')(y)
y = Conv2D(nfeats, 1, padding='same')(y)
return layers.add([y0, y])
def getwhere(x):
''' Calculate the 'where' mask that contains switches indicating which
index contained the max value when MaxPool2D was applied. Using the
gradient of the sum is a nice trick to keep everything high level.'''
y_prepool, y_postpool = x
return K.gradients(K.sum(y_postpool), y_prepool)
if K.backend() == 'tensorflow':
raise RuntimeError('This example can only run with the '
'Theano backend for the time being, '
'because it requires taking the gradient '
'of a gradient, which isn\'t '
'supported for all TF ops.')
# This example assume 'channels_first' data format.
K.set_image_data_format('channels_first')
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# The size of the kernel used for the MaxPooling2D
pool_size = 2
# The total number of feature maps at each layer
nfeats = [8, 16, 32, 64, 128]
# The sizes of the pooling kernel at each layer
pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
# The convolution kernel size
ksize = 3
# Number of epochs to train for
epochs = 1
# Batch size during training
batch_size = 128
if pool_size == 2:
# if using a 5 layer net of pool_size = 2
x_train = np.pad(x_train, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
x_test = np.pad(x_test, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
nlayers = 5
elif pool_size == 3:
# if using a 3 layer net of pool_size = 3
x_train = x_train[:, :, :-1, :-1]
x_test = x_test[:, :, :-1, :-1]
nlayers = 3
else:
import sys
sys.exit('Script supports pool_size of 2 and 3.')
# Shape of input to train on (note that model is fully convolutional however)
input_shape = x_train.shape[1:]
# The final list of the size of axis=1 for all layers, including input
nfeats_all = [input_shape[0]] + nfeats
# First build the encoder, all the while keeping track of the 'where' masks
img_input = Input(shape=input_shape)
# We push the 'where' masks to the following list
wheres = [None] * nlayers
y = img_input
for i in range(nlayers):
y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
wheres[i] = layers.Lambda(getwhere, output_shape=lambda x: x[0])([y_prepool, y])
# Now build the decoder, and use the stored 'where' masks to place the features
for i in range(nlayers):
ind = nlayers - 1 - i
y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
y = layers.multiply([y, wheres[ind]])
y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)
# Use hard_simgoid to clip range of reconstruction
y = Activation('hard_sigmoid')(y)
# Define the model and it's mean square error loss, and compile it with Adam
model = Model(img_input, y)
model.compile('adam', 'mse')
history = TrainingHistory()
sw_manager = StopwatchManager(stop_watch, compile_stop_watch)
# Fit the model
model.fit(x_train,
x_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, x_test),
callbacks=[history, sw_manager])
output.contents = np.array([history.acc, history.loss, history.val_acc, history.val_loss])
# Plot
"""
x_recon = model.predict(x_test[:25])
x_plot = np.concatenate((x_test[:25], x_recon), axis=1)
x_plot = x_plot.reshape((5, 10, input_shape[-2], input_shape[-1]))
x_plot = np.vstack([np.hstack(x) for x in x_plot])
plt.figure()
plt.axis('off')
plt.title('Test Samples: Originals/Reconstructions')
plt.imshow(x_plot, interpolation='none', cmap='gray')
plt.savefig('reconstructions.png')
"""
|
apache-2.0
|
ntvis/tushare
|
tushare/datayes/IV.py
|
10
|
3423
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/10/12
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class IV():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def DerIv(self, beginDate='', endDate='', optID='', SecID='', field=''):
"""
原始隐含波动率,包括期权价格、累计成交量、持仓量、隐含波动率等。
"""
code, result = self.client.getData(vs.DERIV%(beginDate, endDate, optID, SecID, field))
return _ret_data(code, result)
def DerIvHv(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
历史波动率,各个时间段的收盘-收盘历史波动率。
"""
code, result = self.client.getData(vs.DERIVHV%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIndex(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
隐含波动率指数,衡量30天至1080天到期平价期权的平均波动性的主要方法。
"""
code, result = self.client.getData(vs.DERIVINDEX%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIvpDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于参数平滑曲线),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVIVPDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvParam(self, beginDate='', endDate='', SecID='', expDate='', field=''):
"""
隐含波动率参数化曲面,由二阶方程波动曲线在每个到期日平滑后的曲面(a,b,c曲线系数)
"""
code, result = self.client.getData(vs.DERIVPARAM%(beginDate, endDate, SecID, expDate, field))
return _ret_data(code, result)
def DerIvRawDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于原始隐含波动率),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVRAWDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvSurface(self, beginDate='', endDate='', SecID='', contractType='', field=''):
"""
隐含波动率曲面(在值程度),基于在值程度而标准化的曲面。执行价格区间在-60%到+60%,5%升步,到期区间为1个月至3年。
"""
code, result = self.client.getData(vs.DERIVSURFACE%(beginDate, endDate, SecID, contractType, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
bsd-3-clause
|
saketkc/statsmodels
|
statsmodels/datasets/utils.py
|
25
|
10983
|
from statsmodels.compat.python import (range, StringIO, urlopen,
HTTPError, URLError, lrange,
cPickle, urljoin, BytesIO)
import sys
import shutil
from os import environ
from os import makedirs
from os.path import expanduser
from os.path import exists
from os.path import join
import numpy as np
from numpy import array
from pandas import read_csv, DataFrame, Index
def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):
"""
Download and return an example dataset from Stata.
Parameters
----------
data : str
Name of dataset to fetch.
baseurl : str
The base URL to the stata datasets.
as_df : bool
If True, returns a `pandas.DataFrame`
Returns
-------
dta : Record Array
A record array containing the Stata dataset.
Examples
--------
>>> dta = webuse('auto')
Notes
-----
Make sure baseurl has trailing forward slash. Doesn't do any
error checking in response URLs.
"""
# lazy imports
from statsmodels.iolib import genfromdta
url = urljoin(baseurl, data+'.dta')
dta = urlopen(url)
dta = BytesIO(dta.read()) # make it truly file-like
if as_df: # could make this faster if we don't process dta twice?
return DataFrame.from_records(genfromdta(dta))
else:
return genfromdta(dta)
class Dataset(dict):
def __init__(self, **kw):
# define some default attributes, so pylint can find them
self.endog = None
self.exog = None
self.data = None
self.names = None
dict.__init__(self, kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data
# attribute you must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in range(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
index_idx=None):
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
if index_idx is not None: # NOTE: will have to be improved for dates
endog.index = Index(data.ix[:, index_idx])
exog.index = Index(data.ix[:, index_idx])
data = data.set_index(names[index_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
if data.index.equals(Index(lrange(1, len(data) + 1))):
data = data.reset_index(drop=True)
return data
def _get_cache(cache):
if cache is False:
# do not do any caching or load from cache
cache = None
elif cache is True: # use default dir for cache
cache = get_data_home(None)
else:
cache = get_data_home(cache)
return cache
def _cache_it(data, cache_path):
if sys.version_info[0] >= 3:
# for some reason encode("zip") won't work for me in Python 3?
import zlib
# use protocol 2 so can open with python 2.x if cached in 3.x
open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data,
protocol=2)))
else:
open(cache_path, "wb").write(cPickle.dumps(data).encode("zip"))
def _open_cache(cache_path):
if sys.version_info[0] >= 3:
# NOTE: don't know why but decode('zip') doesn't work on my
# Python 3 build
import zlib
data = zlib.decompress(open(cache_path, 'rb').read())
# return as bytes object encoded in utf-8 for cross-compat of cached
data = cPickle.loads(data).encode('utf-8')
else:
data = open(cache_path, 'rb').read().decode('zip')
data = cPickle.loads(data)
return data
def _urlopen_cached(url, cache):
"""
Tries to load data from cache location otherwise downloads it. If it
downloads the data and cache is not None then it will put the downloaded
data in the cache path.
"""
from_cache = False
if cache is not None:
cache_path = join(cache,
url.split("://")[-1].replace('/', ',') + ".zip")
try:
data = _open_cache(cache_path)
from_cache = True
except:
pass
# not using the cache or didn't find it in cache
if not from_cache:
data = urlopen(url).read()
if cache is not None: # then put it in the cache
_cache_it(data, cache_path)
return data, from_cache
def _get_data(base_url, dataname, cache, extension="csv"):
url = base_url + (dataname + ".%s") % extension
try:
data, from_cache = _urlopen_cached(url, cache)
except HTTPError as err:
if '404' in str(err):
raise ValueError("Dataset %s was not found." % dataname)
else:
raise err
data = data.decode('utf-8', 'strict')
return StringIO(data), from_cache
def _get_dataset_meta(dataname, package, cache):
# get the index, you'll probably want this cached because you have
# to download info about all the data to get info about any of the data...
index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
"datasets.csv")
data, _ = _urlopen_cached(index_url, cache)
# Python 3
if sys.version[0] == '3': # pragma: no cover
data = data.decode('utf-8', 'strict')
index = read_csv(StringIO(data))
idx = np.logical_and(index.Item == dataname, index.Package == package)
dataset_meta = index.ix[idx]
return dataset_meta["Title"].item()
def get_rdataset(dataname, package="datasets", cache=False):
"""download and return R dataset
Parameters
----------
dataname : str
The name of the dataset you want to download
package : str
The package in which the dataset is found. The default is the core
'datasets' package.
cache : bool or str
If True, will download this data into the STATSMODELS_DATA folder.
The default location is a folder called statsmodels_data in the
user home folder. Otherwise, you can specify a path to a folder to
use for caching the data. If False, the data will not be cached.
Returns
-------
dataset : Dataset instance
A `statsmodels.data.utils.Dataset` instance. This objects has
attributes::
* data - A pandas DataFrame containing the data
* title - The dataset title
* package - The package from which the data came
* from_cache - Whether not cached data was retrieved
* __doc__ - The verbatim R documentation.
Notes
-----
If the R dataset has an integer index. This is reset to be zero-based.
Otherwise the index is preserved. The caching facilities are dumb. That
is, no download dates, e-tags, or otherwise identifying information
is checked to see if the data should be downloaded again or not. If the
dataset is in the cache, it's used.
"""
# NOTE: use raw github bc html site might not be most up to date
data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/csv/"+package+"/")
docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/doc/"+package+"/rst/")
cache = _get_cache(cache)
data, from_cache = _get_data(data_base_url, dataname, cache)
data = read_csv(data, index_col=0)
data = _maybe_reset_index(data)
title = _get_dataset_meta(dataname, package, cache)
doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
from_cache=from_cache)
# The below function were taken from sklearn
def get_data_home(data_home=None):
"""Return the path of the statsmodels data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'statsmodels_data'
in the user home folder.
Alternatively, it can be set by the 'STATSMODELS_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('STATSMODELS_DATA',
join('~', 'statsmodels_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def check_internet():
"""Check if internet is available"""
try:
urlopen("https://github.com")
except URLError as err:
return False
return True
|
bsd-3-clause
|
treverhines/PyGeoNS
|
pygeons/clean/clean.py
|
1
|
4945
|
'''
Defines cleaning functions which are called by the PyGeoNS executable.
'''
from __future__ import division
import numpy as np
import logging
import matplotlib.pyplot as plt
from pygeons.io.convert import dict_from_hdf5,hdf5_from_dict
from pygeons.mjd import mjd,mjd_inv
from pygeons.basemap import make_basemap
from pygeons.clean.iclean import InteractiveCleaner
from pygeons.units import unit_conversion
from pygeons.plot.plot import (_unit_string,
_setup_map_ax,
_setup_ts_ax)
logger = logging.getLogger(__name__)
def _remove_extension(f):
'''remove file extension if one exists'''
if '.' not in f:
return f
else:
return '.'.join(f.split('.')[:-1])
def pygeons_clean(input_file,resolution='i',
input_edits_file=None,
break_lons=None,break_lats=None,
break_conn=None,no_display=False,
output_stem=None,**kwargs):
'''
runs the PyGeoNS Interactive Cleaner
Parameters
----------
data : dict
data dictionary
resolution : str
basemap resolution
input_edits_file : str
Name of the file containing edits which will automatically be
applied before opening up the interactive viewer.
output_edits_file : str
Name of the file where all edits will be recorded.
**kwargs :
gets passed to pygeons.clean.clean
Returns
-------
out : dict
output data dictionary
'''
logger.info('Running pygeons clean ...')
data = dict_from_hdf5(input_file)
out = dict((k,np.copy(v)) for k,v in data.iteritems())
ts_fig,ts_ax = plt.subplots(3,1,sharex=True,num='Time Series View',facecolor='white')
_setup_ts_ax(ts_ax)
map_fig,map_ax = plt.subplots(num='Map View',facecolor='white')
bm = make_basemap(data['longitude'],data['latitude'],resolution=resolution)
_setup_map_ax(bm,map_ax)
x,y = bm(data['longitude'],data['latitude'])
pos = np.array([x,y]).T
t = data['time']
dates = [mjd_inv(ti,'%Y-%m-%d') for ti in t]
units = _unit_string(data['space_exponent'],data['time_exponent'])
conv = 1.0/unit_conversion(units,time='day',space='m')
u = conv*data['east']
v = conv*data['north']
z = conv*data['vertical']
su = conv*data['east_std_dev']
sv = conv*data['north_std_dev']
sz = conv*data['vertical_std_dev']
ic = InteractiveCleaner(
t,pos,u=u,v=v,z=z,su=su,sv=sv,sz=sz,
map_ax=map_ax,ts_ax=ts_ax,
time_labels=dates,
units=units,
station_labels=data['id'],
**kwargs)
# make edits to the data set prior to displaying it
if input_edits_file is not None:
with open(input_edits_file,'r') as fin:
for line in fin:
# ignore blank lines
if line.isspace():
continue
type,sta,a,b = line.strip().split()
# set the current station in *ic* to the station for this edit
xidx, = (data['id'] == sta).nonzero()
if len(xidx) == 0:
# continue because the station does not exist in this
# dataset
continue
ic.xidx = xidx[0]
if type == 'outliers':
start_time = mjd(a,'%Y-%m-%d')
stop_time = mjd(b,'%Y-%m-%d')
ic.remove_outliers(start_time,stop_time)
elif type == 'jump':
jump_time = mjd(a,'%Y-%m-%d')
delta = int(b)
ic.remove_jump(jump_time,delta)
else:
raise ValueError('edit type must be either "outliers" or "jump"')
if not no_display:
ic.update()
ic.connect()
# set output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.clean'
output_file = output_stem + '.h5'
output_edits_file = output_stem + '.txt'
with open(output_edits_file,'w') as fout:
for i in ic.log:
type,xidx,a,b = i
if type == 'outliers':
station = data['id'][xidx]
start_date = mjd_inv(a,'%Y-%m-%d')
stop_date = mjd_inv(b,'%Y-%m-%d')
fout.write('outliers %s %s %s\n' % (station,start_date,stop_date))
elif type == 'jump':
station = data['id'][xidx]
jump_date = mjd_inv(a,'%Y-%m-%d')
fout.write('jump %s %s %s\n' % (station,jump_date,b))
else:
raise ValueError('edit type must be either "outliers" or "jump"')
logger.info('Edits saved to %s' % output_edits_file)
clean_data = ic.get_data()
out['east'] = clean_data[0]/conv
out['north'] = clean_data[1]/conv
out['vertical'] = clean_data[2]/conv
out['east_std_dev'] = clean_data[3]/conv
out['north_std_dev'] = clean_data[4]/conv
out['vertical_std_dev'] = clean_data[5]/conv
hdf5_from_dict(output_file,out)
logger.info('Cleaned data written to %s' % output_file)
logger.info('Edits written to %s' % output_edits_file)
return
|
mit
|
RPGOne/Skynet
|
scikit-learn-0.18.1/sklearn/tests/test_discriminant_analysis.py
|
7
|
13097
|
import sys
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import SkipTest
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert isinstance(lda.LDA(), LinearDiscriminantAnalysis)
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert isinstance(qda.QDA(), QuadraticDiscriminantAnalysis)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
|
bsd-3-clause
|
Garrett-R/scikit-learn
|
examples/applications/face_recognition.py
|
42
|
5390
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
Smerity/keras
|
examples/kaggle_otto_nn.py
|
7
|
3642
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(512, input_shape=(dims,)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
|
mit
|
maximus009/kaggle-galaxies
|
try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048.py
|
7
|
17257
|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
|
bsd-3-clause
|
gtesei/fast-furious
|
competitions/cdiscount-image-classification-challenge/mynet_reload.py
|
1
|
13669
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
import io
import bson # this is installed with the pymongo package
import matplotlib.pyplot as plt
from skimage.data import imread # or, whatever image library you prefer
import multiprocessing as mp # will come in handy due to the size of the data
import os
from tqdm import *
import struct
from collections import defaultdict
import cv2
from keras import backend as K
import threading
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
from keras.layers import Input, Dense
from keras.models import Model
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from skimage.color import rgb2yuv
############################################################################
__GLOBAL_PARAMS__ = {
'MODEL' : "mynet" ,
'DEBUG' : False,
'NORMALIZATION' : True,
'YUV' : True ,
'MULTI_SCALE' : False
}
########
if __GLOBAL_PARAMS__['MULTI_SCALE']:
raise Exception("MULTI_SCALE not supported yet!")
__MODEL__KEY__ = ""
for k in sorted(__GLOBAL_PARAMS__.keys()):
if not k.startswith("_"):
__MODEL__KEY__ += "__" + str(k) + "_" + str(__GLOBAL_PARAMS__[k])
if (__GLOBAL_PARAMS__['DEBUG']):
LOG_FILE = "simple.log"
else:
LOG_FILE = "log" + __MODEL__KEY__ + ".log"
SUB_FILE = "sub_reloaded_" + __MODEL__KEY__ + ".csv.gz"
import logging
logging.basicConfig(format='%(asctime)s %(message)s', filename=LOG_FILE,level=logging.DEBUG)
#logging.debug('This message should go to the log file')
if (__GLOBAL_PARAMS__['DEBUG']):
logging.info('** DEBUG: '+__MODEL__KEY__+' ****************************************************************')
else:
logging.info('** PRODUCTION:'+__MODEL__KEY__+' ****************************************************************')
#logging.warning('And this, too')
########### -------------> FUNC
def preprocess_image(x):
if __GLOBAL_PARAMS__['NORMALIZATION']:
x = (x - 128.0) / 128.0
if __GLOBAL_PARAMS__['YUV']:
x = np.array([rgb2yuv(x.reshape((1,180,180,3)))])
x = x.reshape((180,180,3))
return x
class BSONIterator(Iterator):
def __init__(self, bson_file, images_df, offsets_df, num_class,
image_data_generator, lock, target_size=(180, 180),
with_labels=True, batch_size=32, shuffle=False, seed=None):
self.file = bson_file
self.images_df = images_df
self.offsets_df = offsets_df
self.with_labels = with_labels
self.samples = len(images_df)
self.num_class = num_class
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.image_shape = self.target_size + (3,)
print("Found %d images belonging to %d classes." % (self.samples, self.num_class))
super(BSONIterator, self).__init__(self.samples, batch_size, shuffle, seed)
self.lock = lock
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
if self.with_labels:
batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
for i, j in enumerate(index_array):
# Protect file and dataframe access with a lock.
with self.lock:
image_row = self.images_df.iloc[j]
product_id = image_row["product_id"]
offset_row = self.offsets_df.loc[product_id]
# Read this product's data from the BSON file.
self.file.seek(offset_row["offset"])
item_data = self.file.read(offset_row["length"])
# Grab the image from the product.
item = bson.BSON.decode(item_data)
img_idx = image_row["img_idx"]
bson_img = item["imgs"][img_idx]["picture"]
# Load the image.
img = load_img(io.BytesIO(bson_img), target_size=self.target_size)
# Preprocess the image.
x = img_to_array(img)
x = preprocess_image(x)
#x = self.image_data_generator.random_transform(x)
#x = self.image_data_generator.standardize(x)
# Add the image and the label to the batch (one-hot encoded).
batch_x[i] = x
if self.with_labels:
batch_y[i, image_row["category_idx"]] = 1
if self.with_labels:
return batch_x, batch_y
else:
return batch_x
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array[0])
def make_category_tables():
cat2idx = {}
idx2cat = {}
for ir in categories_df.itertuples():
category_id = ir[0]
category_idx = ir[4]
cat2idx[category_id] = category_idx
idx2cat[category_idx] = category_id
return cat2idx, idx2cat
def read_bson(bson_path, num_records, with_categories):
rows = {}
with open(bson_path, "rb") as f, tqdm(total=num_records) as pbar:
offset = 0
while True:
item_length_bytes = f.read(4)
if len(item_length_bytes) == 0:
break
length = struct.unpack("<i", item_length_bytes)[0]
f.seek(offset)
item_data = f.read(length)
assert len(item_data) == length
item = bson.BSON.decode(item_data)
product_id = item["_id"]
num_imgs = len(item["imgs"])
row = [num_imgs, offset, length]
if with_categories:
row += [item["category_id"]]
rows[product_id] = row
offset += length
f.seek(offset)
pbar.update()
columns = ["num_imgs", "offset", "length"]
if with_categories:
columns += ["category_id"]
df = pd.DataFrame.from_dict(rows, orient="index")
df.index.name = "product_id"
df.columns = columns
df.sort_index(inplace=True)
return df
def make_val_set(df, split_percentage=0.2, drop_percentage=0.):
# Find the product_ids for each category.
category_dict = defaultdict(list)
for ir in tqdm(df.itertuples()):
category_dict[ir[4]].append(ir[0])
train_list = []
val_list = []
with tqdm(total=len(df)) as pbar:
for category_id, product_ids in category_dict.items():
category_idx = cat2idx[category_id]
# Randomly remove products to make the dataset smaller.
keep_size = int(len(product_ids) * (1. - drop_percentage))
if keep_size < len(product_ids):
product_ids = np.random.choice(product_ids, keep_size, replace=False)
# Randomly choose the products that become part of the validation set.
val_size = int(len(product_ids) * split_percentage)
if val_size > 0:
val_ids = np.random.choice(product_ids, val_size, replace=False)
else:
val_ids = []
# Create a new row for each image.
for product_id in product_ids:
row = [product_id, category_idx]
for img_idx in range(df.loc[product_id, "num_imgs"]):
if product_id in val_ids:
val_list.append(row + [img_idx])
else:
train_list.append(row + [img_idx])
pbar.update()
columns = ["product_id", "category_idx", "img_idx"]
train_df = pd.DataFrame(train_list, columns=columns)
val_df = pd.DataFrame(val_list, columns=columns)
return train_df, val_df
########### -------------> MAIN
categories_path = os.path.join("data", "category_names.csv")
categories_df = pd.read_csv(categories_path, index_col="category_id")
# Maps the category_id to an integer index. This is what we'll use to
# one-hot encode the labels.
print(">>> Mapping category_id to an integer index ... ")
categories_df["category_idx"] = pd.Series(range(len(categories_df)), index=categories_df.index)
print(categories_df.head())
cat2idx, idx2cat = make_category_tables()
# Test if it works:
print(cat2idx[1000012755], idx2cat[4] , len(cat2idx))
print(">>> Train set ... ")
data_dir = "data"
if (__GLOBAL_PARAMS__['DEBUG']):
print(">>> DEBUG mode ... ")
train_bson_path = os.path.join(data_dir, "train_example.bson")
num_train_products = 82
else:
print(">>> PRODUCTION mode ... ")
train_bson_path = os.path.join(data_dir, "train.bson")
num_train_products = 7069896
test_bson_path = os.path.join(data_dir, "test.bson")
num_test_products = 1768182
print(train_bson_path,num_train_products)
if (not __GLOBAL_PARAMS__['DEBUG']):
if os.path.isfile("train_offsets.csv"):
print(">> reading from file train_offsets ... ")
train_offsets_df = pd.read_csv("train_offsets.csv")
train_offsets_df.set_index( "product_id" , inplace= True)
train_offsets_df.sort_index(inplace=True)
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_offsets_df.to_csv("train_offsets.csv")
print(train_offsets_df.head())
if os.path.isfile("train_images.csv"):
print(">> reading from file train_images / val_images ... ")
train_images_df = pd.read_csv("train_images.csv")
train_images_df = train_images_df[['product_id','category_idx','img_idx']]
val_images_df = pd.read_csv("val_images.csv")
val_images_df = val_images_df[['product_id', 'category_idx', 'img_idx']]
else:
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
train_images_df.to_csv("train_images.csv")
val_images_df.to_csv("val_images.csv")
print(train_images_df.head())
print(val_images_df.head())
categories_df.to_csv("categories.csv")
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
print(train_images_df.head())
print(val_images_df.head())
## Generator
print(">>> Generator ... ")
# Tip: use ImageDataGenerator for data augmentation and preprocessing ??
train_bson_file = open(train_bson_path, "rb")
lock = threading.Lock()
num_classes = len(cat2idx)
num_train_images = len(train_images_df)
num_val_images = len(val_images_df)
batch_size = 128
train_datagen = ImageDataGenerator()
train_gen = BSONIterator(train_bson_file, train_images_df, train_offsets_df,
num_classes, train_datagen, lock,
batch_size=batch_size, shuffle=True)
val_datagen = ImageDataGenerator()
val_gen = BSONIterator(train_bson_file, val_images_df, train_offsets_df,
num_classes, val_datagen, lock,
batch_size=batch_size, shuffle=True)
## Model
print(">>> reloading last model ... ")
print("mod" + __MODEL__KEY__ + '.h5')
from keras.models import load_model
inputs = Input(shape=(180, 180, 3))
x = Conv2D(32, 5, padding="valid", activation="relu")(inputs)
x = BatchNormalization()(x) # hope similar to local response normalization
x = MaxPooling2D(pool_size=(3, 3))(x)
#fl1 = Flatten()(x)
x2 = Conv2D(64, 5, padding="valid", activation="relu")(x)
x2 = BatchNormalization()(x2) # hope similar to local response normalization
x2 = MaxPooling2D(pool_size=(3, 3))(x2)
fl2 = Flatten()(x2)
#merged = concatenate([fl1, fl2]) # multi scale features
merged = Dropout(0.5)(fl2)
merged = BatchNormalization()(merged)
merged = Dense(2*num_classes, activation='relu')(merged)
merged = Dropout(0.5)(merged)
merged = BatchNormalization()(merged)
predictions = Dense(num_classes, activation='softmax')(merged)
model = Model(inputs=inputs, outputs=predictions)
model.load_weights("mod" + __MODEL__KEY__ + '.h5')
model.summary()
## Predict on Test-set
print(">>> Predicting on test-set ... ")
submission_df = pd.read_csv("data/sample_submission.csv")
print(submission_df.head())
test_datagen = ImageDataGenerator()
data = bson.decode_file_iter(open(test_bson_path, "rb"))
with tqdm(total=num_test_products) as pbar:
for c, d in enumerate(data):
product_id = d["_id"]
num_imgs = len(d["imgs"])
batch_x = np.zeros((num_imgs, 180, 180, 3), dtype=K.floatx())
for i in range(num_imgs):
bson_img = d["imgs"][i]["picture"]
# Load and preprocess the image.
img = load_img(io.BytesIO(bson_img), target_size=(180, 180))
x = img_to_array(img)
x = preprocess_image(x)
# = test_datagen.random_transform(x)
# = test_datagen.standardize(x)
# Add the image to the batch.
batch_x[i] = x
prediction = model.predict(batch_x, batch_size=num_imgs)
avg_pred = prediction.mean(axis=0)
cat_idx = np.argmax(avg_pred)
submission_df.iloc[c]["category_id"] = idx2cat[cat_idx]
pbar.update()
submission_df.to_csv(SUB_FILE, compression="gzip", index=False)
|
mit
|
mdasifhasan/Experiments_HTN_Planner
|
PyHop/util_plot.py
|
1
|
4996
|
from bokeh.plotting import figure, output_file, show, ColumnDataSource, vplot, hplot, gridplot
from bokeh.layouts import column
from bokeh.charts import Step, Line
from bokeh.models import HoverTool
import numpy as np
def test():
x = range(1, 6)
y = [10, 5, 7, 1, 6]
plot = figure(title='Line example', x_axis_label='x', y_axis_label='y')
plot.line(x, y, legend='Test', line_width=4)
output_file("plots_html/plots.html")
show(plot)
#result format: ('print_a_concept_1', 'Concept B'), ('print_a_concept_2', 'Concept A'),
def plot_plan(plan, state_data, varaible_names = []):
plot_plan = plot_plan_steps(plan)
plot_plan_p = plot_plan_steps_with_params(plan)
(plot_s, plot_l )= plot_state(state_data, varaible_names)
output_file("plots_html/plots.html")
show(gridplot(
children=[plot_plan, plot_plan_p, plot_s, plot_l],
toolbar_location='above',
sizing_mode = 'scale_width',
toolbar_options = dict(logo='grey'),
ncols=2
))
# show(gridplot(
# children=[plot_plan, plot_plan_p, plot_s, plot_l], toolbar_location='right', sizing_mode='scale_width',toolbar_options=dict(logo='grey'),
# ncols=2
# ))
# show(column(children=[plot_plan, plot_plan_p, plot_s, plot_l], sizing_mode='stretch_both', responsive=False))
# show(column(children=[plot_plan, plot_plan_p, plot_s, plot_l], sizing_mode='stretch_both', responsive=False))
# show(vplot(plot_plan_p, plot_s,plot_plan, plot_l))
def plot_plan_steps_with_params(plan):
x = []
y = []
ys = []
i = 0
for (o, v) in plan:
s = str(o) + " - " + str(v)
if s not in ys:
ys.append(s)
for (o, v) in plan:
x.append(i)
i += 1
s = str(o) + " - " + str(v)
y.append((ys.index(s) + 1))
plot = figure(title='plan operators with params', y_range=ys)
# plot.line(x, y, legend='plan', line_width=4, source=source)
plot.line(x, y, line_width=4)
plot.circle(x, y, size=15, fill_color="orange", line_color="green", line_width=3)
return plot
def plot_plan_steps(plan):
x = []
y = []
ys = []
i = 0
for (o, v) in plan:
s = str(o)
if s not in ys:
ys.append(s)
for (o, v) in plan:
x.append(i)
i += 1
s = str(o)
y.append((ys.index(s)+1))
# p = figure()
plot = figure(title='plan operators', y_range=ys)
plot.line(x, y, line_width=4)
plot.circle(x, y, size=15, fill_color="orange", line_color="green", line_width=3)
# s = str(ys)
# d = dict(s = y)
# plot = Step(y, title="plan", legend="top_left", ylabel='operator', palette=["red", "green", "blue", "navy"])
return plot
def plot_state(state_data, varaible_names = []):
# plot = figure(title='State Variables', x_axis_label='step', y_axis_label='level')
# data = []
data = dict()
for v in varaible_names:
data[v] = state_data[v]
# plot.line(range(len(data[v])), data[v], legend=v)
# xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
# xyvalues = np.array(data)
plot = Step(data, title="state variables - step graph", legend="top_left", ylabel='', palette=["red", "green", "blue", "orange"])
plot_line = Line(data, title="state variables - line graph", legend="top_left", ylabel='', palette=["red", "green", "blue", "orange"])
# output_file('line.html')
# show(plot)
return (plot, plot_line)
def plot_plan_bokeh_2(plan):
x = []
y = []
ys = []
i = 0
for (o, v) in plan:
# s = str(o) + " - " + str(v)
s = str(o)
if s not in ys:
ys.append(s)
for (o, v) in plan:
i += 1
x.append(i)
# s = str(o) + " - " + str(v)
s = str(o)
y.append(ys.index(s))
# xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
xyvalues = np.array(y)
line = Line(xyvalues, title="plan", legend="top_left", ylabel='operator')
output_file('plots_html/line.html')
show(line)
def plot_plan_sns(plan):
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
fake = pd.DataFrame({'cat': ['red', 'green', 'blue'], 'val': [1, 2, 3]})
ax = sns.barplot(x='val', y='cat',
data=fake,
color='black')
ax.set(xlabel='common xlabel', ylabel='common ylabel')
plt.show()
def plot_plan_sns(plan):
import matplotlib.pyplot as plt
import seaborn as sns
x = []
y = []
ys = []
i = 0
for (o, v) in plan:
s = str(o) + " - " + str(v)
# s = str(o)
if s not in ys:
ys.append(s)
for (o, v) in plan:
x.append(i)
i += 1
s = str(o) + " - " + str(v)
# s = str(o)
y.append(ys.index(s))
sns.set_style("darkgrid")
labels = ys
plt.yticks(y, labels)
plt.plot(x, y)
plt.show()
|
gpl-3.0
|
villalonreina/dipy
|
doc/examples/reconst_dsi.py
|
3
|
3360
|
"""
===========================================
Reconstruct with Diffusion Spectrum Imaging
===========================================
We show how to apply Diffusion Spectrum Imaging [Wedeen08]_ to
diffusion MRI datasets of Cartesian keyhole diffusion gradients.
First import the necessary modules:
"""
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(96, 96, 60, 203)``
This dataset has anisotropic voxel sizes, therefore reslicing is necessary.
"""
affine = img.affine
"""
Read the voxel size from the image header.
"""
voxel_size = img.header.get_zooms()[:3]
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab)
"""
Lets just use one slice only from the data.
"""
dataslice = data[:, :, data.shape[2] / 2]
dsfit = dsmodel.fit(dataslice)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Calculate the ODFs with this specific sphere
"""
ODF = dsfit.odf(sphere)
print('ODF.shape (%d, %d, %d)' % ODF.shape)
"""
ODF.shape ``(96, 96, 724)``
In a similar fashion it is possible to calculate the PDFs of all voxels
in one call with the following way
"""
PDF = dsfit.pdf()
print('PDF.shape (%d, %d, %d, %d, %d)' % PDF.shape)
"""
PDF.shape ``(96, 96, 17, 17, 17)``
We see that even for a single slice this PDF array is close to 345 MBytes so we
really have to be careful with memory usage when use this function with a full
dataset.
The simple solution is to generate/analyze the ODFs/PDFs by iterating through
each voxel and not store them in memory if that is not necessary.
"""
from dipy.core.ndindex import ndindex
for index in ndindex(dataslice.shape[:2]):
pdf = dsmodel.fit(dataslice[index]).pdf()
"""
If you really want to save the PDFs of a full dataset on the disc we recommend
using memory maps (``numpy.memmap``) but still have in mind that even if you do
that for example for a dataset of volume size ``(96, 96, 60)`` you will need about
2.5 GBytes which can take less space when reasonable spheres (with < 1000 vertices)
are used.
Let's now calculate a map of Generalized Fractional Anisotropy (GFA) [Tuch04]_
using the DSI ODFs.
"""
from dipy.reconst.odf import gfa
GFA = gfa(ODF)
import matplotlib.pyplot as plt
fig_hist, ax = plt.subplots(1)
ax.set_axis_off()
plt.imshow(GFA.T)
plt.savefig('dsi_gfa.png', bbox_inches='tight', origin='lower', cmap='gray')
"""
.. figure:: dsi_gfa.png
:align: center
See also :ref:`example_reconst_dsi_metrics` for calculating different types
of DSI maps.
.. [Wedeen08] Wedeen et al., Diffusion spectrum magnetic resonance imaging (DSI)
tractography of crossing fibers, Neuroimage, vol 41, no 4,
1267-1277, 2008.
.. [Tuch04] Tuch, D.S, Q-ball imaging, MRM, vol 52, no 6, 1358-1372, 2004.
.. include:: ../links_names.inc
"""
|
bsd-3-clause
|
aerler/GeoPy
|
src/plotting/figure.py
|
1
|
25914
|
'''
Created on Dec 11, 2014
A custom Figure class that provides some specialized functions and uses a custom Axes class.
@author: Andre R. Erler, GPL v3
'''
# external imports
from warnings import warn
from matplotlib.figure import Figure, SubplotBase, subplot_class_factory
import numpy as np
# internal imports
from geodata.misc import isInt , ArgumentError
from plotting.axes import MyAxes, MyLocatableAxes, Axes, MyPolarAxes, TaylorAxes
from plotting.misc import loadStyleSheet, toGGcolors
import matplotlib as mpl
# just for convenience
from matplotlib.pyplot import show, figure
## my new figure class
class MyFigure(Figure):
'''
A custom Figure class that provides some specialized functions and uses a custom Axes class.
This is achieved by overloading add_axes and add_subplot.
(This class does not support built-in projections; use the Basemap functionality instead.)
'''
# some default parameters
axes_list = None # list of current subplot axes
title_height = 0.05
title_size = 'x-large'
print_settings = None
shared_legend = None
legend_axes = None
shared_colorbar = None
colorbar_axes = None
def __init__(self, *args, **kwargs):
''' constructor that accepts custom axes_class as keyword argument '''
# parse arguments
if 'axes_class' in kwargs:
axes_class = kwargs.pop('axes_class')
if not issubclass(axes_class, Axes): raise TypeError(axes_class)
else: axes_class = MyAxes # default
if 'axes_args' in kwargs:
axes_args = kwargs.pop('axes_args')
if axes_args is not None and not isinstance(axes_args, dict): raise TypeError
else: axes_args = None # default
if 'print_settings' in kwargs:
print_settings = kwargs.pop('print_settings')
else: print_settings = None
# call parent constructor
super(MyFigure,self).__init__(*args, **kwargs)
# save axes class for later
self.axes_class = axes_class
self.axes_args = axes_args
self.axes_list = [] # list of actual subplots
# print options
self.print_settings = dict(dpi=300, transparent=False) # defaults
if print_settings: self.print_settings.update(print_settings)
# N.B.: using the built-in mechanism to choose Axes seems to cause more problems
# from matplotlib.projections import register_projection
# # register custom class with mpl
# register_projection(axes_class)
# def add_axes(self, *args, **kwargs):
# ''' overloading original add_subplot in order to use custom Axes (adapted from parent) '''
# if 'projection' not in kwargs:
# kwargs['projection'] = 'my'
# super(MyFigure,self).__init__(*args, **kwargs)
def add_axes(self, *args, **kwargs):
''' overloading original add_axes in order to use custom Axes (adapted from parent) '''
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes): # allow all Axes, if passed explicitly
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
# by registering the new Axes class as a projection, it may be possible
# to use the old axes creation mechanism, but it doesn't work this way...
# from matplotlib.figure import process_projection_requirements
# if 'projection' not in kwargs: kwargs['projection'] = 'my'
# axes_class, kwargs, key = process_projection_requirements(
# self, *args, **kwargs)
axes_class = kwargs.pop('axes_class',None)
if axes_class is None: axes_class = self.axes_class # defaults to my new custom axes (MyAxes)
key = self._make_key(*args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if ax is not None and isinstance(ax, axes_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
# add default axes arguments
if self.axes_args is not None:
axes_args = self.axes_args.copy()
axes_args.update(kwargs)
else: axes_args = kwargs
a = axes_class(self, rect, **axes_args)
self._axstack.add(key, a)
self.sca(a)
# attach link to figure (self)
a.figure = self
return a
def add_subplot(self, *args, **kwargs):
''' overloading original add_subplot in order to use custom Axes (adapted from parent) '''
if not len(args):
return
if len(args) == 1 and isinstance(args[0], int):
args = tuple([int(c) for c in str(args[0])])
if isinstance(args[0], SubplotBase):
# I'm not sure what this does...
a = args[0]
assert(a.get_figure() is self)
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
# if 'projection' not in kwargs: kwargs['projection'] = 'my'
# axes_class, kwargs, key = process_projection_requirements(
# self, *args, **kwargs)
axes_class = kwargs.pop('axes_class',None)
if axes_class is None: axes_class = self.axes_class # defaults to my new custom axes (MyAxes)
key = self._make_key(*args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, axes_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
# add default axes arguments
if self.axes_args is not None:
axes_args = self.axes_args.copy()
axes_args.update(kwargs)
else: axes_args = kwargs
# generate subplot class and create axes instance
a = subplot_class_factory(axes_class)(self, *args, **axes_args)
self._axstack.add(key, a)
self.sca(a)
# add to list of current subplots
self.axes_list.append(a)
# return axes
return a
# function to adjust subplot parameters
def updateSubplots(self, mode='shift', **kwargs):
''' simple helper function to move (relocate), shift, or scale subplot margins '''
pos = self.subplotpars
margins = dict() # original plot margins
margins['left'] = pos.left; margins['right'] = pos.right
margins['top'] = pos.top; margins['bottom'] = pos.bottom
margins['wspace'] = pos.wspace; margins['hspace'] = pos.hspace
# update subplot margins
if mode.lower() in ('set','move','adjust'): margins.update(kwargs)
else:
for key,val in kwargs.items():
if key in margins:
if mode.lower() == 'shift': margins[key] += val
elif mode.lower() == 'scale': margins[key] *= val
# finally, actually update figure
self.subplots_adjust(**margins)
# and now repair damage: restore axes
for ax in self.axes:
# Adjusting subplots does not work with LocatableAxes, but that is depricated...
ax.updateAxes(mode='adjust')
# add common/shared legend to a multi-panel plot
def addSharedLegend(self, plots=None, labels=None, fontsize=None, hscl=1., hpad=0.015, location='bottom', loc=None, ncols=None, **kwargs):
''' add a common/shared legend to a multi-panel plot '''
# complete input
if labels is None: labels = [None if plt is None else plt.get_label() for plt in plots ]
elif not isinstance(labels, (list,tuple)): raise TypeError
if plots is None: plots = self.axes[0].plots
elif plots is not None and not isinstance(plots, (list,tuple)): raise TypeError
# figure out fontsize and row numbers
fontsize = fontsize or self.axes[0].get_yaxis().get_label().get_fontsize() # or fig._suptitle.get_fontsize()
nlen = len(plots) if plots else len(labels)
if ncols is None:
if fontsize > 11: ncols = 2 if nlen == 4 else 3
else: ncols = 3 if nlen == 6 else 4
# make room for legend
if location.lower() == 'bottom':
leghgt = ( np.ceil(float(nlen)/float(ncols)) * fontsize/250.) * hscl
self.updateSubplots(mode='shift', bottom=leghgt) # shift bottom upwards (add height pad)
ax = self.add_axes([0, hpad, 1,leghgt], axes_class=MyAxes) # new axes to hold legend, with some attributes
if loc is None: loc = 9
elif location.lower() == 'right':
leghgt = ( ncols * fontsize/40.) * hscl
self.updateSubplots(mode='shift', right=-leghgt-hpad) # shift bottom upwards (add height pad)
ax = self.add_axes([0.99-leghgt-hpad, 0, leghgt+hpad,1-self.title_height-hpad], axes_class=MyAxes) # new axes to hold legend, with some attributes
if loc is None: loc = 2 # upper left
ax.set_frame_on(False); ax.axes.get_yaxis().set_visible(False); ax.axes.get_xaxis().set_visible(False)
# define legend parameters
legargs = dict(loc=loc, ncol=ncols, borderaxespad=0., fontsize=fontsize, frameon=True,
labelspacing=0.1, handlelength=1.3, handletextpad=0.3, fancybox=True)
legargs.update(kwargs)
# create legend and return handle
if plots: legend = ax.legend(plots, labels, **legargs)
else: legend = ax.legend(labels, **legargs)
# store axes handle and legend
self.legend_axes = ax
self.shared_legend = legend
return legend
# add common/shared legend to a multi-panel plot
def addSharedColorbar(self, ax=None, mappable=None, size=None, ipad=None, opad=None, clevs=None,
fmt='{:3.2f}', title=None, scale=1., length=1., lunits=False, location='bottom',
orientation=None, extend='both', **kwargs):
''' add a common/shared colorbar to a multi-panel plot '''
gca = self.gca() if ax is None else ax
if mappable is None: mappable = gca.color_plot # use color plot on current axes
# axes title (defaults to units for vertical axis)
units = gca.cunits if lunits and hasattr(gca,'cunits') else None
# make room for colorbar
if location.lower() == 'bottom':
orientation = orientation or 'horizontal'
if clevs is None: clevs = 5
if ipad is None: ipad = 0.01 * scale
if size is None: size = 0.04 * scale
if opad is None: opad = 0.07 * scale
self.updateSubplots(mode='shift', bottom=ipad+size+opad) # shift bottom upwards (add height pad)
ax = self.add_axes([(1.-length)/2., opad, length,size], axes_class=MyAxes) # new axes to hold legend, with some attributes
elif location.lower() == 'right':
orientation = orientation or 'vertical'
if units and orientation.lower() == 'vertical' and title is None: title = ' [{UNITS}]'
if clevs is None: clevs = 9
if ipad is None: ipad = 0.005 * scale
if size is None: size = 0.03 * scale
if opad is None: opad = 0.075 * scale
self.updateSubplots(mode='shift', right=-(ipad+size+opad)) # shift bottom upwards (add height pad)
height = 1 - self.title_height # effective available height
length = height * length
ax = self.add_axes([1-size-opad, (height-length)/2., size,length], axes_class=MyAxes) # new axes to hold legend, with some attributes
ax.set_frame_on(False); #ax.axes.get_yaxis().set_visible(False); ax.axes.get_xaxis().set_visible(False)
# set title
if title: ax.set_title(title.format(UNITS=units))
# define colorbar parameters
cbargs = dict(orientation=orientation, extend=extend,)
cbargs.update(kwargs)
# create legend and return handle
# cbargs['ticks'] = clevs
cbar = self.colorbar(cax=ax, mappable=mappable, **cbargs)
# add tick labels
if clevs:
cmin,cmax = mappable.get_clim()
if isinstance(clevs, (np.integer,int)): clevs = np.linspace(cmin,cmax,clevs)
elif isinstance(clevs, tuple) and len(clevs) == 3: clevs = np.linspace(*clevs)
elif not isinstance(clevs, (list,tuple,np.ndarray)):
raise TypeError(clevs)
cbar.set_ticks(clevs)
if fmt:
cbar.ax.xaxis.set_tick_params(pad=2)
cbar.ax.yaxis.set_tick_params(pad=4)
if units and 'UNITS' in fmt:
clev_lbls = [fmt.format(clev, UNITS=units) for clev in clevs]
else:
clev_lbls = [fmt.format(clev) for clev in clevs]
cbar.set_ticklabels(clev_lbls)
# store axes handle and legend
self.colorbar_axes = ax
self.shared_colorbar = cbar
return cbar
# add subplot/axes labels
def addLabels(self, labels=None, loc=1, lstroke=False, lalphabet=True, size=None, prop=None, **kwargs):
# expand list
axes = self.axes
n = len(axes)
if not isinstance(labels,(list,tuple)): labels = [labels]*n
if not isinstance(loc,(list,tuple)): loc = [loc]*n
if not isinstance(lstroke,(list,tuple)): lstroke = [lstroke]*n
# settings
if prop is None: prop = dict()
if not size: prop['size'] = 'large'
args = dict(pad=0., borderpad=1.5, frameon=False)
args.update(kwargs)
# cycle over axes
ats = [] # list of texts
for i,ax in enumerate(axes):
# skip shared legend or colorbar
if ax is not self.legend_axes and ax is not self.colorbar_axes:
# default label
label = labels[i]
if label is None:
label = i
if not lalphabet: label += 1
# create label artist
ats.append(ax.addLabel(label, loc=loc[i], lstroke=lstroke[i], lalphabet=lalphabet,
prop=prop.copy(), **args))
return ats
# save figure
def save(self, *args, **kwargs):
''' save figure with some sensible default settings '''
if len(args) == 0: raise ArgumentError
# get option
folder = kwargs.pop('folder', None)
lfeedback = kwargs.pop('lfeedback', None) or kwargs.pop('feedback', None)
lreplaceSpace = kwargs.pop('lreplaceSpace', True) and kwargs.pop('lreplaceSpace', True)
filetype = kwargs.pop('filetype', 'pdf')
lword = kwargs.pop('lword', True) # also produce M$ Word compatible filetype (mainly EPS version of PDFs)
# construct filename
basename = ''
for arg in args:
if arg is not None:
if isinstance(arg, (list,tuple)):
for a in arg: basename += str(a)
else: basename += str(arg)
basename += '_'
basename = basename[:-1] # remove last underscore
# replace spaces, if desired
if lreplaceSpace:
basename = basename.replace(' ', '_')
# add filename extension
filename = '{}.{}'.format(basename,filetype)
# update print settings
sf = self.print_settings.copy() # print properties
sf.update(kwargs) # update with kwargs
# save file
if lfeedback: print(("Saving figure as '{:s}'".format(filename)))
if folder:
filename = '{:s}/{:s}'.format(folder,filename)
if lfeedback: print(("('{:s}')".format(folder)))
self.savefig(filename, **sf) # save figure to pdf
# save M$ Word compatible file version
if lword:
# determine alternative filetype
alttype = None
if filetype.lower() in ('pdf','eps','svg'): alttype = 'png'
# save alternative format
if alttype:
altname = '{}.{}'.format(basename,alttype)
if lfeedback: print(("(Saving alternate format as '{:s}')".format(altname)))
if folder: altname = '{:s}/{:s}'.format(folder,altname)
self.savefig(altname, **sf) # save figure to pdf
## convenience function to return a figure and an array of ImageGrid axes
def getFigAx(subplot, name=None, title=None, title_font='x-large', title_height=None, figsize=None,
variable_plotargs=None, dataset_plotargs=None, plot_labels=None, yright=False, xtop=False,
sharex=None, sharey=None, lAxesGrid=False, ngrids=None, direction='row',
lPolarAxes=False, lTaylor = False, ledge_ticks=False,
axes_pad = None, add_all=True, share_all=None, aspect=False, margins=None,
label_mode='L', cbar_mode=None, cbar_location='right', lreduce=True,
cbar_pad=None, cbar_size='5%', axes_class=None, axes_args=None, stylesheet=None,
lpresentation=False, lpublication=False, figure_class=None, **figure_args):
# load stylesheet
if stylesheet is not None:
loadStyleSheet(stylesheet, lpresentation=lpresentation, lpublication=lpublication)
if stylesheet in ('myggplot','ggplot'):
warn("Rewriting built-in color definitions to GG-plot defaults.")
if dataset_plotargs is not None: dataset_plotargs = toGGcolors(dataset_plotargs) # modifies in-place!
# default figure class
if figure_class is None: figure_class = MyFigure
elif not issubclass(figure_class, Figure): raise TypeError
# figure out subplots
if isinstance(subplot,(np.integer,int)):
if subplot == 1: subplot = (1,1)
elif subplot == 2: subplot = (1,2)
elif subplot == 3: subplot = (1,3)
elif subplot == 4: subplot = (2,2)
elif subplot == 6: subplot = (2,3)
elif subplot == 9: subplot = (3,3)
else: raise NotImplementedError
elif not (isinstance(subplot,(tuple,list)) and len(subplot) == 2) and all(isInt(subplot)): raise TypeError
# create figure
if figsize is None:
if lpublication:
if subplot == (1,1): figsize = (3.75,3.75)
elif subplot == (1,2) or subplot == (1,3): figsize = (6.25,3.75)
elif subplot == (2,1) or subplot == (3,1): figsize = (3.75,7)
else: figsize = (6.25,6.25)
elif lpresentation:
if subplot == (1,2) or subplot == (1,3): figsize = (5,3)
elif subplot == (2,1) or subplot == (3,1): figsize = (3,5)
else: figsize = (5,5)
else:
if subplot == (1,1): figsize = (5,5)
elif subplot == (1,2) or subplot == (1,3): figsize = (9,5)
elif subplot == (2,1) or subplot == (3,1): figsize = (5,9)
else: figsize = (9,9)
# figure out margins
if margins is None:
# N.B.: the rectangle definition is presumably left, bottom, width, height
if subplot == (1,1): margins = (0.1,0.1,0.85,0.85)
elif subplot == (1,2) or subplot == (1,3): margins = (0.06,0.1,0.92,0.87)
elif subplot == (2,1) or subplot == (3,1): margins = (0.09,0.11,0.88,0.82)
elif subplot == (2,2) or subplot == (3,3): margins = (0.09,0.1,0.9,0.88)
else: margins = (0.09,0.11,0.88,0.82)
if title_height is None: title_height = getattr(figure_class, 'title_height', 0.05) # use default from figure
if title is not None: margins = margins[:3]+(margins[3]-title_height,) # make room for title
# # some style sheets have different label sizes
# if stylesheet.lower() in ('myggplot','ggplot'):
# margins = list(margins)
# margins[0] += 0.015; margins[1] -= 0.01 # left, bottom
# margins[2] += 0.02; margins[3] += 0.02 # width, height
# handle special TaylorPlot axes
if lTaylor:
if not lPolarAxes: lPolarAxes = True
if not axes_class: axes_class = TaylorAxes
# handle mixed Polar/Axes
if isinstance(axes_class, (list,tuple,np.ndarray)):
for i,axcls in enumerate(axes_class):
if axcls is None:
if lTaylor: axes_class[i] = TaylorAxes
elif lPolarAxes: axes_class[i] = MyPolarAxes
else: axes_class[i] = MyAxes
elif axcls.lower() == 'taylor': axes_class[i] = TaylorAxes
elif axcls.lower() == 'polar': axes_class[i] = MyPolarAxes
elif axcls.lower() in ('regular','default'): axes_class[i] = MyAxes
if not issubclass(axcls, Axes): raise TypeError(axcls)
# create axes
if lAxesGrid:
if share_all is None: share_all = True
if axes_pad is None: axes_pad = 0.05
# adjust margins for ignored label pads
margins = list(margins)
margins[0] += 0.005; margins[1] -= 0.02 # left, bottom
margins[2] -= 0.005; margins[3] -= 0.00 # width, height
# create axes using the Axes Grid package
if axes_class is None: axes_class=MyLocatableAxes
fig = mpl.pylab.figure(facecolor='white', figsize=figsize, axes_class=axes_class,
FigureClass=figure_class, **figure_args)
if axes_args is None: axes_class = (axes_class,{})
elif isinstance(axes_args,dict): axes_class = (axes_class,axes_args)
else: raise TypeError
from mpl_toolkits.axes_grid1 import ImageGrid
# AxesGrid: http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html
grid = ImageGrid(fig, margins, nrows_ncols = subplot, ngrids=ngrids, direction=direction,
axes_pad=axes_pad, add_all=add_all, share_all=share_all, aspect=aspect,
label_mode=label_mode, cbar_mode=cbar_mode, cbar_location=cbar_location,
cbar_pad=cbar_pad, cbar_size=cbar_size, axes_class=axes_class)
# return figure and axes
axes = np.asarray(grid).reshape(subplot) # don't want flattened array
#axes = tuple([ax for ax in grid]) # this is already flattened
elif isinstance(axes_class, (list,tuple,np.ndarray)):
# PolarAxes can't share axes and by default don't have labels
if figure_args is None: figure_args = dict()
fig = figure(facecolor='white', figsize=figsize, FigureClass=figure_class, **figure_args)
# now create list of axes
if axes_args is None: axes_args = dict()
axes = np.empty(subplot, dtype=object); n = 0
for i in range(subplot[0]):
for j in range(subplot[1]):
n += 1
axes[i,j] = fig.add_subplot(subplot[0], subplot[1], n, axes_class=axes_class[n-1],
aspect=aspect, **axes_args)
# just adjust margins
if axes_pad is None: axes_pad = 0.03
wspace = hspace = 0.1
margin_dict = dict(left=margins[0], bottom=margins[1], right=margins[0]+margins[2],
top=margins[1]+margins[3], wspace=wspace, hspace=hspace)
fig.subplots_adjust(**margin_dict)
else:
# select default axes based on other arguments
if axes_class is None:
if lPolarAxes:
axes_class = MyPolarAxes
share_all = sharex = sharey = False
# N.B.: PolarAxes does not support sharing of axes, and
# default behavior is to hide labels
else:
axes_class = MyAxes
# create axes using normal subplot routine
if axes_pad is None: axes_pad = 0.03
wspace = hspace = axes_pad
if share_all:
sharex='all'; sharey='all'
if sharex is True or sharex is None: sharex = 'col' # default
if sharey is True or sharey is None: sharey = 'row'
if sharex: hspace -= 0.015
if sharey: wspace -= 0.015
# other axes arguments
if axes_args is not None and not isinstance(axes_args,dict): raise TypeError
# create figure
from matplotlib.pyplot import subplots
# GridSpec: http://matplotlib.org/users/gridspec.html
fig, axes = subplots(subplot[0], subplot[1], sharex=sharex, sharey=sharey,squeeze=lreduce,
facecolor='white', figsize=figsize, FigureClass=figure_class,
subplot_kw=axes_args, axes_class=axes_class, **figure_args)
# there is also a subplot_kw=dict() and fig_kw=dict()
# just adjust margins
margin_dict = dict(left=margins[0], bottom=margins[1], right=margins[0]+margins[2],
top=margins[1]+margins[3], wspace=wspace, hspace=hspace)
fig.subplots_adjust(**margin_dict)
# apply reduction
if lreduce:
if isinstance(axes,np.ndarray):
axes = axes.squeeze() # remove singleton dimensions
if axes.ndim == 0: axes = axes.item()
if isinstance(axes,(list,tuple)) and len(axes) == 1:
axes = axes[0] # return a bare axes instance, if there is only one axes
## set label positions
if not lPolarAxes:
# X-/Y-labels and -ticks
yright = not sharey and subplot[0]==2 if yright is None else yright
xtop = not sharex and subplot[1]==2 if xtop is None else xtop
if isinstance(axes, Axes):
axes.yright = yright
axes.xtop = xtop
else:
if axes.ndim == 1:
if subplot[1] == 2: axes[-1].yright = yright # right panel
if subplot[0] == 2: axes[0].xtop = xtop # top panel
elif axes.ndim == 2:
for ax in axes[:,-1]: ax.yright = yright # right column
for ax in axes[0,:]: ax.xtop = xtop # top row
else: raise ValueError
# add figure title
if name is None: name = title
if name is not None: fig.canvas.set_window_title(name) # window title
if title is not None:
y = 1. - ( title_height / ( 5. if 'x' in title_font else 8. ) ) # smaller title closer to the top
if isinstance(title_font,str): title_font = dict(fontsize=title_font, y=y)
fig.suptitle(title, **title_font) # title on figure (printable)
fig.title_height = title_height # save value
# add default line styles for variables and datasets to axes (figure doesn't need to know)
if isinstance(axes, np.ndarray):
for ax in axes.ravel():
ax.variable_plotargs = variable_plotargs
ax.dataset_plotargs = dataset_plotargs
ax.plot_labels = plot_labels
ax.edge_ticks = ledge_ticks
else:
axes.variable_plotargs = variable_plotargs
axes.dataset_plotargs = dataset_plotargs
axes.plot_labels = plot_labels
# return Figure/ImageGrid and tuple of axes
#if AxesGrid: fig = grid # return ImageGrid instead of figure
return fig, axes
if __name__ == '__main__':
pass
|
gpl-3.0
|
gdementen/xlwings
|
xlwings/tests/udfs/udf_tests.py
|
1
|
16102
|
from datetime import datetime, date
import sys
if sys.version_info >= (2, 7):
from nose.tools import assert_dict_equal
import xlwings as xw
try:
import numpy as np
from numpy.testing import assert_array_equal
def nparray_equal(a, b):
try:
assert_array_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
def frame_equal(a, b):
try:
assert_frame_equal(a, b)
except AssertionError:
return False
return True
def series_equal(a, b):
try:
assert_series_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
pd = None
def dict_equal(a, b):
try:
assert_dict_equal(a, b)
except AssertionError:
return False
return True
# Defaults
@xw.func
def read_float(x):
return x == 2.
@xw.func
def write_float():
return 2.
@xw.func
def read_string(x):
return x == 'xlwings'
@xw.func
def write_string():
return 'xlwings'
@xw.func
def read_empty(x):
return x is None
@xw.func
def read_date(x):
return x == datetime(2015, 1, 15)
@xw.func
def write_date():
return datetime(1969, 12, 31)
@xw.func
def read_datetime(x):
return x == datetime(1976, 2, 15, 13, 6, 22)
@xw.func
def write_datetime():
return datetime(1976, 2, 15, 13, 6, 23)
@xw.func
def read_horizontal_list(x):
return x == [1., 2.]
@xw.func
def write_horizontal_list():
return [1., 2.]
@xw.func
def read_vertical_list(x):
return x == [1., 2.]
@xw.func
def write_vertical_list():
return [[1.], [2.]]
@xw.func
def read_2dlist(x):
return x == [[1., 2.], [3., 4.]]
@xw.func
def write_2dlist():
return [[1., 2.], [3., 4.]]
# Keyword args on default converters
@xw.func
@xw.arg('x', ndim=1)
def read_ndim1(x):
return x == [2.]
@xw.func
@xw.arg('x', ndim=2)
def read_ndim2(x):
return x == [[2.]]
@xw.func
@xw.arg('x', transpose=True)
def read_transpose(x):
return x == [[1., 3.], [2., 4.]]
@xw.func
@xw.ret(transpose=True)
def write_transpose():
return [[1., 2.], [3., 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as1(x):
return x == [[1., date(2015, 1, 13)], [date(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as2(x):
return x == date(2005, 1, 15)
@xw.func
@xw.arg('x', dates=datetime)
def read_dates_as3(x):
return x == [[1., datetime(2015, 1, 13)], [datetime(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', empty='empty')
def read_empty_as(x):
return x == [[1., 'empty'], ['empty', 4.]]
if sys.version_info >= (2, 7):
# assert_dict_equal isn't available on nose for PY 2.6
# Dicts
@xw.func
@xw.arg('x', dict)
def read_dict(x):
return dict_equal(x, {'a': 1., 'b': 'c'})
@xw.func
@xw.arg('x', dict, transpose=True)
def read_dict_transpose(x):
return dict_equal(x, {1.0: 'c', 'a': 'b'})
@xw.func
def write_dict():
return {'a': 1., 'b': 'c'}
# Numpy Array
if np:
@xw.func
@xw.arg('x', np.array)
def read_scalar_nparray(x):
return nparray_equal(x, np.array(1.))
@xw.func
@xw.arg('x', np.array)
def read_empty_nparray(x):
return nparray_equal(x, np.array(np.nan))
@xw.func
@xw.arg('x', np.array)
def read_horizontal_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_vertical_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_date_nparray(x):
return nparray_equal(x, np.array(datetime(2000, 12, 20)))
# Keyword args on Numpy arrays
@xw.func
@xw.arg('x', np.array, ndim=1)
def read_ndim1_nparray(x):
return nparray_equal(x, np.array([2.]))
@xw.func
@xw.arg('x', np.array, ndim=2)
def read_ndim2_nparray(x):
return nparray_equal(x, np.array([[2.]]))
@xw.func
@xw.arg('x', np.array, transpose=True)
def read_transpose_nparray(x):
return nparray_equal(x, np.array([[1., 3.], [2., 4.]]))
@xw.func
@xw.ret(transpose=True)
def write_transpose_nparray():
return np.array([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', np.array, dates=date)
def read_dates_as_nparray(x):
return nparray_equal(x, np.array(date(2000, 12, 20)))
@xw.func
@xw.arg('x', np.array, empty='empty')
def read_empty_as_nparray(x):
return nparray_equal(x, np.array('empty'))
@xw.func
def write_np_scalar():
return np.float64(2)
# Pandas Series
if pd:
@xw.func
@xw.arg('x', pd.Series, header=False, index=False)
def read_series_noheader_noindex(x):
return series_equal(x, pd.Series([1., 2.]))
@xw.func
@xw.arg('x', pd.Series, header=False, index=True)
def read_series_noheader_index(x):
return series_equal(x, pd.Series([1., 2.], index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=False)
def read_series_header_noindex(x):
return series_equal(x, pd.Series([1., 2.], name='name'))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_named_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix')))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_nameless_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_nameless_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_named_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=False, index=2)
def read_series_noheader_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], index=ix))
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_noheader_noindex():
return pd.Series([1., 2.])
@xw.func
@xw.ret(pd.Series, index=True)
def write_series_noheader_index():
return pd.Series([1., 2.], index=[10., 20.])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_header_noindex():
return pd.Series([1., 2.], name='name')
@xw.func
def write_series_header_named_index():
return pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix'))
@xw.func
@xw.ret(pd.Series, index=True, header=True)
def write_series_header_nameless_index():
return pd.Series([1., 2.], name='name', index=[10., 20.])
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_nameless_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_named_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=False, index=2)
def write_series_noheader_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], index=ix)
@xw.func
@xw.arg('x', pd.Series)
def read_timeseries(x):
return series_equal(x, pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)]))
@xw.func
@xw.ret(pd.Series)
def write_timeseries():
return pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_nan():
return pd.Series([1, np.nan, 3])
# Pandas DataFrame
if pd:
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=False)
def read_df_0header_0index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]]))
@xw.func
@xw.ret(pd.DataFrame, index=False, header=False)
def write_df_0header_0index():
return pd.DataFrame([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=True)
def read_df_1header_0index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]], columns=['a', 'b']))
@xw.func
@xw.ret(pd.DataFrame, index=False, header=True)
def write_df_1header_0index():
return pd.DataFrame([[1., 2.], [3., 4.]], columns=['a', 'b'])
@xw.func
@xw.arg('x', pd.DataFrame, index=True, header=False)
def read_df_0header_1index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]], index=[10., 20.]))
@xw.func
@xw.ret(pd.DataFrame, index=True, header=False)
def write_df_0header_1index():
return pd.DataFrame([[1., 2.], [3., 4.]], index=[10, 20])
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=False)
def read_df_0header_2index(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]))
return frame_equal(x, df)
@xw.func
@xw.ret(pd.DataFrame, index=2, header=False)
def write_df_0header_2index():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
def read_df_1header_1namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
df.index.name = 'ix1'
return frame_equal(x, df)
@xw.func
def write_df_1header_1namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
df.index.name = 'ix1'
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
def read_df_1header_1unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
return frame_equal(x, df)
@xw.func
def write_df_1header_1unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=2)
def read_df_2header_0index(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
@xw.ret(pd.DataFrame, index=False, header=2)
def write_df_2header_0index():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
def read_df_2header_1namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
df.index.name = 'ix1'
return frame_equal(x, df)
@xw.func
def write_df_2header_1namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
df.index.name = 'ix1'
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
def read_df_2header_1unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_1unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
def read_df_2header_2namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_2namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
def read_df_2header_2unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_2unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=1)
def read_df_1header_2namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=['a', 'd', 'c'])
return frame_equal(x, df)
@xw.func
def write_df_1header_2namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=['a', 'd', 'c'])
return df
@xw.func
@xw.arg('x', pd.DataFrame)
def read_df_date_index(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[datetime(1999,12,13), datetime(1999,12,14)],
columns=['c', 'd', 'c'])
return frame_equal(x, df)
@xw.func
def write_df_date_index():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[datetime(1999,12,13), datetime(1999,12,14)],
columns=['c', 'd', 'c'])
return df
@xw.func
def read_workbook_caller():
wb = xw.Book.caller()
return xw.Range('E277').value == 1.
@xw.func
def default_args(x, y="hello", z=20):
return 2 * x + 3 * len(y) + 7 * z
@xw.func
def variable_args(x, *z):
return 2 * x + 3 * len(z) + 7 * z[0]
@xw.func
def optional_args(x, y=None):
if y is None:
y = 10
return x * y
@xw.func
def write_none():
return None
if __name__ == "__main__":
xw.serve()
|
bsd-3-clause
|
balazssimon/ml-playground
|
udemy/lazyprogrammer/deep-reinforcement-learning-python/mountaincar/pg_tf_random.py
|
1
|
6813
|
import gym
import os
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from q_learning import plot_running_avg, FeatureTransformer
# so you can test different architectures
class HiddenLayer:
def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True, zeros=False):
if zeros:
W = np.zeros((M1, M2)).astype(np.float32)
self.W = tf.Variable(W)
else:
self.W = tf.Variable(tf.random_normal(shape=(M1, M2)))
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.params.append(self.b)
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
# approximates pi(a | s)
class PolicyModel:
def __init__(self, ft, D, hidden_layer_sizes_mean=[], hidden_layer_sizes_var=[]):
# save inputs for copy
self.ft = ft
self.D = D
self.hidden_layer_sizes_mean = hidden_layer_sizes_mean
self.hidden_layer_sizes_var = hidden_layer_sizes_var
##### model the mean #####
self.mean_layers = []
M1 = D
for M2 in hidden_layer_sizes_mean:
layer = HiddenLayer(M1, M2)
self.mean_layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
##### model the variance #####
self.var_layers = []
M1 = D
for M2 in hidden_layer_sizes_var:
layer = HiddenLayer(M1, M2)
self.var_layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, tf.nn.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
# gather params
self.params = []
for layer in (self.mean_layers + self.var_layers):
self.params += layer.params
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, D), name='X')
self.actions = tf.placeholder(tf.float32, shape=(None,), name='actions')
self.advantages = tf.placeholder(tf.float32, shape=(None,), name='advantages')
def get_output(layers):
Z = self.X
for layer in layers:
Z = layer.forward(Z)
return tf.reshape(Z, [-1])
# calculate output and cost
mean = get_output(self.mean_layers)
std = get_output(self.var_layers) + 1e-4 # smoothing
# note: the 'variance' is actually standard deviation
norm = tf.contrib.distributions.Normal(mean, std)
self.predict_op = tf.clip_by_value(norm.sample(), -1, 1)
def set_session(self, session):
self.session = session
def init_vars(self):
init_op = tf.variables_initializer(self.params)
self.session.run(init_op)
# def partial_fit(self, X, actions, advantages):
# X = np.atleast_2d(X)
# X = self.ft.transform(X)
# actions = np.atleast_1d(actions)
# advantages = np.atleast_1d(advantages)
# self.session.run(
# self.train_op,
# feed_dict={
# self.X: X,
# self.actions: actions,
# self.advantages: advantages,
# }
# )
def predict(self, X):
X = np.atleast_2d(X)
X = self.ft.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def sample_action(self, X):
p = self.predict(X)[0]
# print("action:", p)
return p
def copy(self):
clone = PolicyModel(self.ft, self.D, self.hidden_layer_sizes_mean, self.hidden_layer_sizes_mean)
clone.set_session(self.session)
clone.init_vars() # tf will complain if we don't do this
clone.copy_from(self)
return clone
def copy_from(self, other):
# collect all the ops
ops = []
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
# now run them all
self.session.run(ops)
def perturb_params(self):
ops = []
for p in self.params:
v = self.session.run(p)
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
# with probability 0.1 start completely from scratch
op = p.assign(noise)
else:
op = p.assign(v + noise)
ops.append(op)
self.session.run(ops)
def play_one(env, pmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
# oddly, the mountain car environment requires the action to be in
# an object where the actual action is stored in object[0]
observation, reward, done, info = env.step([action])
totalreward += reward
iters += 1
return totalreward
def play_multiple_episodes(env, T, pmodel, gamma, print_iters=False):
totalrewards = np.empty(T)
for i in range(T):
totalrewards[i] = play_one(env, pmodel, gamma)
if print_iters:
print(i, "avg so far:", totalrewards[:(i+1)].mean())
avg_totalrewards = totalrewards.mean()
print("avg totalrewards:", avg_totalrewards)
return avg_totalrewards
def random_search(env, pmodel, gamma):
totalrewards = []
best_avg_totalreward = float('-inf')
best_pmodel = pmodel
num_episodes_per_param_test = 3
for t in range(100):
tmp_pmodel = best_pmodel.copy()
tmp_pmodel.perturb_params()
avg_totalrewards = play_multiple_episodes(
env,
num_episodes_per_param_test,
tmp_pmodel,
gamma
)
totalrewards.append(avg_totalrewards)
if avg_totalrewards > best_avg_totalreward:
best_pmodel = tmp_pmodel
best_avg_totalreward = avg_totalrewards
return totalrewards, best_pmodel
def main():
env = gym.make('MountainCarContinuous-v0')
ft = FeatureTransformer(env, n_components=100)
D = ft.dimensions
pmodel = PolicyModel(ft, D, [], [])
# init = tf.global_variables_initializer()
session = tf.InteractiveSession()
# session.run(init)
pmodel.set_session(session)
pmodel.init_vars()
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
totalrewards, pmodel = random_search(env, pmodel, gamma)
print("max reward:", np.max(totalrewards))
# play 100 episodes and check the average
avg_totalrewards = play_multiple_episodes(env, 100, pmodel, gamma, print_iters=True)
print("avg reward over 100 episodes with best models:", avg_totalrewards)
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
if __name__ == '__main__':
main()
|
apache-2.0
|
ClimbsRocks/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
110
|
3768
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
Macemann/Georgetown-Capstone
|
app/Graph_stats.py
|
1
|
5030
|
'''
Script to create Graph Statistics
Statistics are dumped to disk for app ease
__author__ = Ryan Stephany
__purpose__ Georgetown Data Analytics
'''
import os
import pickle
import networkx as nx
import community
import matplotlib.pyplot as plt
from operator import itemgetter
MODELS_PATH = os.path.join(os.path.abspath(os.path.dirname(os.path.abspath(__file__))),'models')
with open(os.path.join(MODELS_PATH,'hidden_names.pkl'),'rb') as handler:
HIDDEN = pickle.load(handler)
NICKNAMES = [v[1] for v in HIDDEN['HIDDEN'].values()]
# Taken from Benjamin Bengfort and modified
def nbest_centrality(graph, metric, n=10, attribute="centrality", **kwargs):
centrality = metric(graph, **kwargs)
nx.set_node_attributes(graph, attribute, centrality)
degrees = sorted(centrality.items(), key=itemgetter(1), reverse=True)
output = []
for idx, item in enumerate(degrees[0:n]):
item = list(item)
if graph.has_node(item[0]):
node = graph.node[item[0]]
try:
item[0] = node['screen_name']
except:
pass
item = tuple(item)
item = (idx+1,) + item
print "%i. %s: %0.4f" % item
output.append(item)
return output
def gen_graph_stats (graph):
G = nx.read_graphml(graph)
stats = {}
edges, nodes = 0,0
for e in G.edges_iter(): edges += 1
for n in G.nodes_iter(): nodes += 1
stats['Edges'] = (edges,'The number of edges within the Graph')
stats['Nodes'] = (nodes, 'The number of nodes within the Graph')
print "%i edges, %i nodes" % (edges, nodes)
# Accessing the highest degree node
center, degree = sorted(G.degree().items(), key=itemgetter(1), reverse=True)[0]
stats['Center Node'] = ('%s: %0.5f' % (center,degree),'The center most node in the graph. Which has the highest degree')
hairball = nx.subgraph(G, [x for x in nx.connected_components(G)][0])
print "Average shortest path: %0.4f" % nx.average_shortest_path_length(hairball)
stats['Average Shortest Path Length'] = (nx.average_shortest_path_length(hairball), '')
# print "Center: %s" % G[center]
# print "Shortest Path to Center: %s" % p
print "Degree: %0.5f" % degree
stats['Degree'] = (degree,'The node degree is the number of edges adjacent to that node.')
print "Order: %i" % G.number_of_nodes()
stats['Order'] = (G.number_of_nodes(),'The number of nodes in the graph.')
print "Size: %i" % G.number_of_edges()
stats['Size'] = (G.number_of_edges(),'The number of edges in the graph.')
print "Clustering: %0.5f" % nx.average_clustering(G)
stats['Average Clustering'] = (nx.average_clustering(G),'The average clustering coefficient for the graph.')
print "Transitivity: %0.5f" % nx.transitivity(G)
stats['Transitivity'] = (nx.transitivity(G),'The fraction of all possible triangles present in the graph.')
part = community.best_partition(G)
# values = [part.get(node) for node in G.nodes()]
# nx.draw_spring(G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)
# plt.show()
mod = community.modularity(part,G)
print "modularity: %0.5f" % mod
stats['Modularity'] = (mod,'The modularity of a partition of a graph.')
knn = nx.k_nearest_neighbors(G)
print knn
stats['K Nearest Neighbors'] = (knn,'the average degree connectivity of graph.\nThe average degree connectivity is the average nearest neighbor degree of nodes with degree k. For weighted graphs, an analogous measure can be computed using the weighted average neighbors degre')
return G, stats
if __name__ == '__main__':
graphs = {}
for nn in NICKNAMES:
G, g_stats = gen_graph_stats(os.path.join(MODELS_PATH,'%s.graphml' % nn))
graphs[nn] = {}
graphs[nn]['stats'] = g_stats
dc = nbest_centrality(G, nx.degree_centrality, n=15)
graphs[nn]['degree_centrality'] = dc
bc = nbest_centrality(G, nx.betweenness_centrality, n=15)
graphs[nn]['betweenness_centrality'] = bc
cc = nbest_centrality(G, nx.closeness_centrality, n=15)
graphs[nn]['closeness_centrality'] = cc
ec = nbest_centrality(G, nx.eigenvector_centrality_numpy, n=15)
graphs[nn]['eigenvector_centrality'] = ec
G, g_stats = gen_graph_stats(os.path.join(MODELS_PATH,'all.graphml'))
graphs['all'] = {}
graphs['all']['stats'] = g_stats
dc = nbest_centrality(G, nx.degree_centrality, n=15)
graphs['all']['degree_centrality'] = dc
bc = nbest_centrality(G, nx.betweenness_centrality, n=15)
graphs['all']['betweenness_centrality'] = bc
cc = nbest_centrality(G, nx.closeness_centrality, n=15)
graphs['all']['closeness_centrality'] = cc
ec = nbest_centrality(G, nx.eigenvector_centrality_numpy, n=15)
graphs['all']['eigenvector_centrality'] = ec
kc = nbest_centrality(G, nx.katz_centrality_numpy,n=15)
graphs['all']['katz_centrality'] = kc
partition = community.best_partition(G)
size = float(len(set(partition.values())))
count = 0
for com in set(partition.values()):
count+=1
graphs['all']['Communities'] = count
with open(os.path.join(MODELS_PATH,'graph_measures.pkl'),'wb') as handler:
pickle.dump(graphs,handler)
|
mit
|
aflaxman/pymc-cod-correct
|
src/tests.py
|
1
|
3462
|
""" Tests """
# matplotlib will open windows during testing unless you do the following
import matplotlib
matplotlib.use("AGG")
import pylab as pl
import pymc as mc
import models
reload(models)
import data
reload(data)
import graphics
reload(graphics)
class TestClass:
def setUp(self):
self.X = data.sim_data(10)
def test_sim_data(self):
sim_data = data.sim_data(10)
assert sim_data.shape == (10,2,3), 'Should be 10x2x3 matrix of data (%s found)' % str(sim_data.shape)
sim_data = data.sim_data(10, [[.1, .4, .5]], [.1, .1, .1])
assert sim_data.shape == (10,1,3), 'Should be 10x1x3 matrix of data (%s found)' % str(sim_data.shape)
def test_sim_data_2(self):
sims = 10000
return # skip for now
test1 = pl.zeros(3, dtype='f').view(pl.recarray)
for i in range(sims):
temp = data.sim_data(1, [0.1,0.1,0.8], [0.01,0.01,0.01])
test1 = pl.vstack((test1, temp))
test1 = test1[1:,]
test2 = data.sim_data(sims, [0.1,0.1,0.8], [0.01, 0.01, 0.01])
diff = (test1.mean(0) - test2.mean(0))/test1.mean(0)
assert pl.allclose(diff, 0, atol=0.01), 'should be close to zero, (%s found)' % str(diff)
def test_get_cod_data(self):
return # skip for now
cf = data.get_cod_data(level=1)
assert len(cf.cause) == 3 and cf.cause.dtype == 'S1'
assert len(cf.est) == 3 and cf.est.dtype == 'float32'
assert len(cf.lower) == 3 and cf.lower.dtype == 'float32'
assert len(cf.upper) == 3 and cf.upper.dtype == 'float32'
# this only tests that level 1 causes work; the function takes awhile to run at higher levels, so it may not be feasible to repeatedly test this at higher levels.
def test_sim_cod_data(self):
return # skip for now
cf = data.get_cod_data(level=1)
X = data.sim_cod_data(10, cf)
assert pl.shape(X) == (10, 3)
def test_sim_data_for_validation(self):
return # skip for now
sim_data = data.sim_data_for_validation(10, [0.5, 0.5], [0.1, 0.1])
assert sim_data.shape == (10,2), 'Should be 10x2 matrix of data (%s found)' % str(sim_data.shape)
sim_data = data.sim_data_for_validation(10, [.1, .4, .5], [.1, .1, .1])
assert sim_data.shape == (10,3), 'Should be 10x3 matrix of data (%s found)' % str(sim_data.shape)
def test_plot_sim_data(self):
return # skip for now
X = data.sim_data(10, [.1, .4, .5], [.1, .1, .1])
graphics.plot_sim_data(X)
assert list(pl.axis()) == [0., 1., 0., 1.], 'plot limits should be unit square, (%s found)' % str(pl.axis())
graphics.plot_all_sim_data(X)
def test_bad_model(self):
X = data.sim_data(10)
Y = models.bad_model(X)
assert pl.allclose(Y.sum(axis=2), 1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))
# test again for 10x2x3 dataset
X = data.sim_data(10, [[.1, .4, .5]], [.1, .1, .1])
Y = models.bad_model(X)
assert pl.allclose(Y.sum(axis=2), 1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))
def test_good_model(self):
vars = models.latent_simplex(self.X)
assert pl.all(pl.sum(vars['pi'].value, 1) <= 1.0), 'pi values should sum to at most 1, (%s found)' % pl.sum(vars['pi'].value, 1)
m = mc.MCMC(vars)
m.sample(10)
if __name__ == '__main__':
import nose
nose.run()
|
gpl-3.0
|
bmazin/ARCONS-pipeline
|
examples/Pal2012-sdss/ConvolutionWithTemp.py
|
1
|
3536
|
import numpy as np
import matplotlib.pyplot as plt
from util import utils
import mpfit
import scipy.optimize as optimize
#This program slides a template across the data to try to improve the data quality. This convultion was not very successful. The program bins the data if neccessary so the template and data have the same point/time ratio. "Trunc" means the template does not cover the entire period, just the area around the eclipse. The number in the template filenames indicate the number of bins per period. dataTemplate.py makes a template by binning and averaging the data. fitTemplate.py makes a template by fitting a skewed gaussian to the "data template" that results from templateMaker.py
FileName = '/Scratch/dataProcessing/SDSS_J0926/AllData/Dec8fitpsfBlueUpdated.npz'
NumFrames = 1700
IntTime = 3
TimeCutOff = 1300
template = np.load('/Scratch/dataProcessing/SDSS_J0926/AllData/Dec8BlueTruncTemplateFitted560.npz')
template3 = np.load('/Scratch/dataProcessing/SDSS_J0926/AllData/Dec8BlueTruncTemplateUpdated560.npz')
tempValues = template['template']
tempjd = template['jd']
tempValues2 = template2['template']
tempjd2 = template2['jd']
tempValues3 = template3['template']
tempjd3 = template3['jd']
t = np.load(FileName)
params = t['params']
jd = t['jd']
period = 0.01966127
amps = params[:,1]
widths = params[:,4]
xpos = params[:,2]
ypos = params[:,3]
NumFrames = TimeCutOff
jd = jd[:TimeCutOff]
amps = amps[:TimeCutOff]
widths = widths[:TimeCutOff]
xpos = xpos[:TimeCutOff]
ypos = ypos[:TimeCutOff]
fig = plt.figure()
ax = fig.add_subplot(311)
curve1 = amps*widths**2
curve = curve/(np.average(curve))
#curve = np.append(curve1[0:411]/np.average(curve1[0:411]), curve1[411:971]/np.average(curve1[411:971]))
#curve = np.append(curve, curve1[971:]/np.average(curve1[971:]))
numbins = 155 #per eclipse period!
jdbin = period/float(numbins)
Totalnumbins = int((jd[NumFrames-1]-jd[0])/jdbin)
#to bin the DATA
average_array = []
for i in range(Totalnumbins):
out_values = np.where(np.logical_and(jd >= i*jdbin+jd[0],jd < (i+1)*jdbin+jd[0]))[0]
iCurve = curve[out_values]
iCurve = iCurve[iCurve != 0]
# iCurve = iCurve[iCurve < 700]
bin_average = np.mean(iCurve)
average_array.append(bin_average)
#jd=np.arange(0,1,binwidth)*period
jd3 = np.arange(jd[0]+jdbin/2.,jd[NumFrames-1]-jdbin/2.,jdbin)
print len(average_array),len(jd3),Totalnumbins
ax.plot(jd,curve,'g')
#ax.plot(jd3,average_array,'r')
ax.set_title(FileName)
#plt.show()
ax2 = fig.add_subplot(312)
MinIndex= np.argmin(tempValues)
tempValues4 = tempValues3[MinIndex-20:MinIndex+20]
tempjd4 = tempjd3[MinIndex-20:MinIndex+20]
filtered = np.correlate(tempValues,curve,mode='same')[::-1]
filtered2 = np.correlate(tempValues2,curve,mode='same')[::-1]
filtered3 = np.correlate(tempValues3,curve,mode='same')[::-1]
filtered4 = np.correlate(tempValues4,curve,mode='same')[::-1]
ax.plot(jd,filtered/np.average(filtered),'r')
ax.plot(jd,filtered4/np.average(filtered4),'k')
ax.plot(jd,filtered3/np.average(filtered3),'b')
#ax2.plot(tempjd2,tempValues2,'.r')
ax2.plot(jd,filtered/np.average(filtered),'r')
ax2.plot(jd,filtered4/np.average(filtered4)+.1,'k')
ax2.plot(jd,filtered3/np.average(filtered3)+.2,'b')
ax3 = fig.add_subplot(313)
ax3.plot(tempjd,tempValues,'.r',label="skew-gauss fitted")
ax3.plot(tempjd4,tempValues4,'ko', label="short not fitted")
ax3.plot(tempjd3,tempValues3,'.b', label="not fitted")
#ax.plot(jd[0]+tempjd2,tempValues2,'.k')
#ax.plot(jd[0]+.01+tempjd,tempValues,'.r')
plt.legend(loc=4)
plt.show()
|
gpl-2.0
|
yzl0083/orange
|
Orange/OrangeWidgets/OWDlgs.py
|
6
|
13862
|
import os
from OWBaseWidget import *
import OWGUI
_have_qwt = True
try:
from PyQt4.Qwt5 import *
except ImportError:
_have_qwt = False
_have_gl = True
try:
from PyQt4.QtOpenGL import QGLWidget
except ImportError:
_have_gl = False
from PyQt4.QtGui import QGraphicsScene, QGraphicsView
from PyQt4.QtSvg import *
from ColorPalette import *
import OWQCanvasFuncts
class OWChooseImageSizeDlg(OWBaseWidget):
settingsList = ["selectedSize", "customX", "customY", "lastSaveDirName", "penWidthFactor"]
def __init__(self, graph, extraButtons = [], defaultName="graph", parent=None, saveMatplotlib=None):
OWBaseWidget.__init__(self, parent, None, "Image settings", modal = TRUE, resizingEnabled = 0)
self.graph = graph
self.selectedSize = 0
self.customX = 400
self.customY = 400
self.saveAllSizes = 0
self.penWidthFactor = 1
self.lastSaveDirName = "./"
self.defaultName = defaultName
self.loadSettings()
self.setLayout(QVBoxLayout(self))
self.space = OWGUI.widgetBox(self)
self.layout().setMargin(8)
#self.layout().addWidget(self.space)
box = OWGUI.widgetBox(self.space, "Image Size")
global _have_qwt
if _have_qwt and isinstance(graph, QwtPlot):
size = OWGUI.radioButtonsInBox(box, self, "selectedSize", ["Current size", "400 x 400", "600 x 600", "800 x 800", "Custom:"], callback = self.updateGUI)
self.customXEdit = OWGUI.lineEdit(OWGUI.indentedBox(box), self, "customX", "Width: ", orientation = "horizontal", valueType = int)
self.customYEdit = OWGUI.lineEdit(OWGUI.indentedBox(box), self, "customY", "Height:", orientation = "horizontal", valueType = int)
OWGUI.comboBoxWithCaption(self.space, self, "penWidthFactor", label = 'Factor: ', box = " Pen width multiplication factor ", tooltip = "Set the pen width factor for all curves in the plot\n(Useful for example when the lines in the plot look to thin)\nDefault: 1", sendSelectedValue = 1, valueType = int, items = range(1,20))
elif isinstance(graph, QGraphicsScene) or isinstance(graph, QGraphicsView) or (_have_gl and isinstance(graph, QGLWidget)):
OWGUI.widgetLabel(box, "Image size will be set automatically.")
box = OWGUI.widgetBox(self.space, 1)
#self.printButton = OWGUI.button(self.space, self, "Print", callback = self.printPic)
self.saveImageButton = OWGUI.button(box, self, "Save Image", callback = self.saveImage)
# If None we try to determine if save can succeed automatically
if saveMatplotlib is None:
saveMatplotlib = self.canSaveToMatplotlib(graph)
if saveMatplotlib and not (_have_gl and isinstance(graph, QGLWidget)):
self.saveMatplotlibButton = OWGUI.button(box, self, "Save Graph as matplotlib Script", callback = self.saveToMatplotlib)
for (text, funct) in extraButtons:
butt = OWGUI.button(box, self, text, callback = funct)
self.connect(butt, SIGNAL("clicked()"), self.accept) # also connect the button to accept so that we close the dialog
OWGUI.button(box, self, "Cancel", callback = self.reject)
self.resize(250,300)
self.updateGUI()
def saveImage(self, filename = None, size = None, closeDialog = 1):
if not filename:
filename = self.getFileName(self.defaultName, "Portable Network Graphics (*.PNG);;Windows Bitmap (*.BMP);;Graphics Interchange Format (*.GIF);;Scalable Vector Graphics (*.SVG)", ".png")
if not filename: return
(fil,ext) = os.path.splitext(filename)
if ext.lower() not in [".bmp", ".gif", ".png", ".svg"] :
ext = ".png" # if no format was specified, we choose png
filename = fil + ext
if _have_gl and isinstance(self.graph, QGLWidget):
img = self.graph.grabFrameBuffer()
if size != None:
img = img.scaled(size)
img.save(filename)
if closeDialog:
QDialog.accept(self)
return
real_graph = self.graph if isinstance(self.graph, QGraphicsView) else None
if real_graph:
self.graph = self.graph.scene()
if isinstance(self.graph, QGraphicsScene):
source = self.getSceneBoundingRect().adjusted(-15, -15, 15, 15)
size = source.size()
elif isinstance(self.graph, QGraphicsView):
source = self.graph.sceneRect()
size = source.size()
elif not size:
size = self.getSize()
painter = QPainter()
if filename.lower().endswith(".svg"):
buffer = QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QSize(int(size.width()), int(size.height())))
else:
buffer = QPixmap(int(size.width()), int(size.height()))
painter.begin(buffer)
painter.setRenderHint(QPainter.Antialiasing)
if not filename.lower().endswith(".svg"):
if isinstance(self.graph, QGraphicsScene) or isinstance(self.graph, QGraphicsView):
# make background same color as the widget's background
brush = self.graph.backgroundBrush()
if brush.style() == Qt.NoBrush:
brush = QBrush(self.graph.palette().color(QPalette.Base))
painter.fillRect(buffer.rect(), brush)
else:
painter.fillRect(buffer.rect(), QBrush(Qt.white))
# qwt plot
global _have_qwt
if _have_qwt and isinstance(self.graph, QwtPlot):
if self.penWidthFactor != 1:
for curve in self.graph.itemList():
pen = curve.pen(); pen.setWidth(self.penWidthFactor*pen.width()); curve.setPen(pen)
self.graph.print_(painter, QRect(0,0,size.width(), size.height()))
if self.penWidthFactor != 1:
for curve in self.graph.itemList():
pen = curve.pen(); pen.setWidth(pen.width()/self.penWidthFactor); curve.setPen(pen)
# QGraphicsScene
elif isinstance(self.graph, QGraphicsScene) or isinstance(self.graph, QGraphicsView):
target = QRectF(0,0, source.width(), source.height())
self.graph.render(painter, target, source)
if not filename.lower().endswith(".svg"):
buffer.save(filename)
if closeDialog:
QDialog.accept(self)
def getSceneBoundingRect(self):
source = QRectF()
for item in self.graph.items():
if item.isVisible():
source = source.united(item.boundingRect().translated(item.pos()))
return source
def saveToMatplotlib(self):
filename = self.getFileName(self.defaultName, "Python Script (*.py)", ".py")
if filename:
global _have_qwt
if _have_qwt and isinstance(self.graph, QwtPlot):
self.graph.saveToMatplotlib(filename, self.getSize())
else:
rect = self.getSceneBoundingRect()
minx, maxx, miny, maxy = rect.x(), rect.x()+rect.width(), rect.y(), rect.y()+rect.height()
f = open(filename, "wt")
f.write("# This Python file uses the following encoding: utf-8\n")
f.write("from pylab import *\nfrom matplotlib.patches import Rectangle\n\n#constants\nx1 = %f; x2 = %f\ny1 = 0.0; y2 = %f\ndpi = 80\nxsize = %d\nysize = %d\nedgeOffset = 0.01\n\nfigure(facecolor = 'w', figsize = (xsize/float(dpi), ysize/float(dpi)), dpi = dpi)\na = gca()\nhold(True)\n" % (minx, maxx, maxy, maxx-minx, maxy-miny))
if isinstance(self.graph, QGraphicsView):
scene = self.graph.scene()
else:
scene = self.graph
sortedList = [(item.zValue(), item) for item in scene.items()]
sortedList.sort() # sort items by z value
for (z, item) in sortedList:
# a little compatibility for QT 3.3 (on Mac at least)
if hasattr(item, "isVisible"):
if not item.isVisible(): continue
elif not item.visible(): continue
if item.__class__ in [QGraphicsRectItem, QGraphicsLineItem]:
penc, penAlpha = self._getColorFromObject(item.pen())
penWidth = item.pen().width()
if isinstance(item, QGraphicsRectItem):
x,y,w,h = item.rect().x(), maxy-item.rect().y()-item.rect().height(), item.rect().width(), item.rect().height()
brushc, brushAlpha = self._getColorFromObject(item.brush())
f.write("a.add_patch(Rectangle((%d, %d), %d, %d, edgecolor=%s, facecolor = %s, linewidth = %d, fill = %d))\n" % (x,y,w,h, penc, brushc, penWidth, type(brushc) == tuple))
elif isinstance(item, QGraphicsLineItem):
x1,y1, x2,y2 = item.line().x1(), maxy-item.line().y1(), item.line().x2(), maxy-item.line().y2()
f.write("plot(%s, %s, marker = 'None', linestyle = '-', color = %s, linewidth = %d, alpha = %.3f)\n" % ([x1,x2], [y1,y2], penc, penWidth, penAlpha))
elif item.__class__ in [QGraphicsTextItem, OWQCanvasFuncts.OWCanvasText]:
if item.__class__ == QGraphicsTextItem:
xalign, yalign = "left", "top"
x, y = item.x(), item.y()
else:
align = item.alignment
#xalign = (align & Qt.AlignLeft and "right") or (align & Qt.AlignRight and "left") or (align & Qt.AlignHCenter and "center")
#yalign = (align & Qt.AlignBottom and "top") or (align & Qt.AlignTop and "bottom") or (align & Qt.AlignVCenter and "center")
xalign = (align & Qt.AlignLeft and "left") or (align & Qt.AlignRight and "right") or (align & Qt.AlignHCenter and "center")
yalign = (align & Qt.AlignBottom and "bottom") or (align & Qt.AlignTop and "top") or (align & Qt.AlignVCenter and "center")
x, y = item.x, item.y
vertAlign = (yalign and ", verticalalignment = '%s'" % yalign) or ""
horAlign = (xalign and ", horizontalalignment = '%s'" % xalign) or ""
color = tuple([item.defaultTextColor().red()/255., item.defaultTextColor().green()/255., item.defaultTextColor().blue()/255.])
weight = item.font().bold() and "bold" or "normal"
f.write("text(%f, %f, '%s'%s%s, color = %s, name = '%s', weight = '%s', alpha = %.3f)\n" % (item.x, maxy-item.y, unicode(item.toPlainText()).encode("utf-8"), vertAlign, horAlign, color, str(item.font().family()), weight, item.defaultTextColor().alpha()/float(255)))
f.write("# disable grid\ngrid(False)\n\n")
f.write("#hide axis\naxis('off')\naxis([x1, x2, y1, y2])\ngca().set_position([edgeOffset, edgeOffset, 1 - 2*edgeOffset, 1 - 2*edgeOffset])\n")
f.write("show()")
f.close()
try:
import matplotlib
except:
QMessageBox.information(self,'Matplotlib missing',"File was saved, but you will not be able to run it because you don't have matplotlib installed.\nYou can download matplotlib for free at matplotlib.sourceforge.net.", QMessageBox.Ok)
QDialog.accept(self)
def canSaveToMatplotlib(self, graph):
if _have_qwt and isinstance(graph, QwtPlot):
# TODO: check all curve items.
return True
elif isinstance(graph, QGraphicsScene):
items = graph.items()
supported = set([QGraphicsRectItem, QGraphicsLineItem,
QGraphicsTextItem, OWQCanvasFuncts.OWCanvasText])
return all(type(item) in supported for item in items)
else:
return False
# ############################################################
# EXTRA FUNCTIONS ############################################
def getFileName(self, defaultName, mask, extension):
fileName = unicode(QFileDialog.getSaveFileName(self, "Save to..", os.path.join(self.lastSaveDirName, defaultName), mask))
if not fileName: return None
if not os.path.splitext(fileName)[1][1:]: fileName = fileName + extension
self.lastSaveDirName = os.path.split(fileName)[0] + "/"
self.saveSettings()
return fileName
def getSize(self):
if isinstance(self.graph, QGraphicsScene):
size = self.getSceneBoundingRect().size()
elif self.selectedSize == 0: size = self.graph.size()
elif self.selectedSize == 4: size = QSize(self.customX, self.customY)
else: size = QSize(200 + self.selectedSize*200, 200 + self.selectedSize*200)
return size
def updateGUI(self):
global _have_qwt
if _have_qwt and isinstance(self.graph, QwtPlot):
self.customXEdit.setEnabled(self.selectedSize == 4)
self.customYEdit.setEnabled(self.selectedSize == 4)
def _getColorFromObject(self, obj):
if isinstance(obj, QBrush) and obj.style() == Qt.NoBrush: return "'none'", 1
if isinstance(obj, QPen) and obj.style() == Qt.NoPen: return "'none'", 1
col = [obj.color().red(), obj.color().green(), obj.color().blue()];
col = tuple([v/float(255) for v in col])
return col, obj.color().alpha()/float(255)
|
gpl-3.0
|
spallavolu/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
moonbury/pythonanywhere
|
MasteringMLWithScikit-learn/8365OS_07_Codes/adult-data-plot.py
|
3
|
1277
|
import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
df = pd.read_csv('data/adult.data', header=None)
y = df[14]
X = df[range(0, 14)]
def one_hot_dataframe(data, cols, replace=False):
""" Takes a dataframe and a list of columns that need to be encoded.
Returns a 3-tuple comprising the data, the vectorized data,
and the fitted vectorizor.
"""
vec = DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
vecData = pd.DataFrame(vec.fit_transform(data[cols].to_dict(outtype='records')).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if replace is True:
data = data.drop(cols, axis=1)
data = data.join(vecData)
return data, vecData, vec
X, _, _ = one_hot_dataframe(X, range(0, 14), replace=True)
X.fillna(-1, inplace=True)
labels = []
for i in y:
if i == ' <=50K':
labels.append(0)
else:
labels.append(1)
pca = PCA(n_components=2)
print 'fitting pca'
X = pca.fit_transform(X)
print 'plotting'
plt.scatter(X[:, 0], X[:, 1], c=labels)
plt.show()
|
gpl-3.0
|
yyjiang/scikit-learn
|
sklearn/neighbors/graph.py
|
208
|
7031
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
|
bsd-3-clause
|
xzh86/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
|
221
|
5517
|
"""
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
|
bsd-3-clause
|
poryfly/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
HazyResearch/metal
|
metal/contrib/visualization/analysis.py
|
1
|
4929
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sparse
from metal.utils import convert_labels
############################################################
# Label Matrix Plotting
############################################################
def view_label_matrix(L, colorbar=True):
"""Display an [n, m] matrix of labels"""
L = L.todense() if sparse.issparse(L) else L
plt.imshow(L, aspect="auto")
plt.title("Label Matrix")
if colorbar:
labels = sorted(np.unique(np.asarray(L).reshape(-1, 1).squeeze()))
boundaries = np.array(labels + [max(labels) + 1]) - 0.5
plt.colorbar(boundaries=boundaries, ticks=labels)
plt.show()
def view_overlaps(L, self_overlaps=False, normalize=True, colorbar=True):
"""Display an [m, m] matrix of overlaps"""
L = L.todense() if sparse.issparse(L) else L
G = _get_overlaps_matrix(L, normalize=normalize)
if not self_overlaps:
np.fill_diagonal(G, 0) # Zero out self-overlaps
plt.imshow(G, aspect="auto")
plt.title("Overlaps")
if colorbar:
plt.colorbar()
plt.show()
def view_conflicts(L, normalize=True, colorbar=True):
"""Display an [m, m] matrix of conflicts"""
L = L.todense() if sparse.issparse(L) else L
C = _get_conflicts_matrix(L, normalize=normalize)
plt.imshow(C, aspect="auto")
plt.title("Conflicts")
if colorbar:
plt.colorbar()
plt.show()
def _get_overlaps_matrix(L, normalize=True):
n, m = L.shape
X = np.where(L != 0, 1, 0).T
G = X @ X.T
if normalize:
G = G / n
return G
def _get_conflicts_matrix(L, normalize=True):
n, m = L.shape
C = np.zeros((m, m))
# Iterate over the pairs of LFs
for i in range(m):
for j in range(m):
# Get the overlapping non-zero indices
overlaps = list(
set(np.where(L[:, i] != 0)[0]).intersection(np.where(L[:, j] != 0)[0])
)
C[i, j] = np.where(L[overlaps, i] != L[overlaps, j], 1, 0).sum()
if normalize:
C = C / n
return C
############################################################
# Classifier Diagnostics
############################################################
def plot_probabilities_histogram(Y_probs, title=None):
"""Plot a histogram from a numpy array of probabilities
Args:
Y_probs: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])
"""
if Y_probs.ndim > 1:
print("Plotting probabilities from the first column of Y_probs")
Y_probs = Y_probs[:, 0]
plt.hist(Y_probs, bins=20)
plt.xlim((0, 1.025))
plt.xlabel("Probability")
plt.ylabel("# Predictions")
if isinstance(title, str):
plt.title(title)
plt.show()
def plot_predictions_histogram(Y_preds, Y_gold, title=None):
"""Plot a histogram comparing int predictions vs true labels by class
Args:
Y_gold: An [n] or [n, 1] np.ndarray of gold labels
Y_preds: An [n] or [n, 1] np.ndarray of predicted int labels
"""
labels = list(set(Y_gold).union(set(Y_preds)))
edges = [x - 0.5 for x in range(min(labels), max(labels) + 2)]
plt.hist([Y_preds, Y_gold], bins=edges, label=["Predicted", "Gold"])
ax = plt.gca()
ax.set_xticks(labels)
plt.xlabel("Label")
plt.ylabel("# Predictions")
plt.legend(loc="upper right")
if isinstance(title, str):
plt.title(title)
plt.show()
def plot_calibration_plot(Y_probs, Y_gold, bins=20, title=None):
"""Plot a histogram of the accuracy for predictions with varying confidences
Args:
Y_probs: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])
Y_gold: An [n] or [n, 1] np.ndarray of gold labels
For a well-behaved classifier, the plot should be a U-shape.
"""
# For now, we only tackle binary classification with categorical labels
assert all(Y_gold > 0)
assert all(Y_gold <= 2)
if Y_probs.ndim > 1:
print("Plotting probabilities from the first column of Y_probs")
Y_probs = Y_probs[:, 0]
Y_preds = convert_labels((Y_probs > 0.5).astype(np.int64), "onezero", "categorical")
correct_idxs = Y_preds == Y_gold
centers = []
accuracies = []
interval = 1 / bins
for i in range(bins + 1):
if i == bins:
bin_idxs = (interval * i <= Y_probs) * (Y_probs <= 1)
else:
bin_idxs = (interval * i <= Y_probs) * (Y_probs < interval * (i + 1))
bin_accuracy = sum(bin_idxs * correct_idxs) / sum(bin_idxs)
centers.append(interval * (i + 0.5))
accuracies.append(bin_accuracy)
# print("Accuracy: ", len(correct_idx) / (1.0 * len(Y_probs)))
# Y_p_correct = Y_probs[correct_idx]
plt.plot(centers, accuracies)
plt.xlim((0, 1.025))
plt.xlabel("Probability")
plt.ylabel("Accuracy")
if isinstance(title, str):
plt.title(title)
|
apache-2.0
|
geoscixyz/em_examples
|
em_examples/LinearInversion.py
|
1
|
17361
|
import numpy as np
from SimPEG import Mesh
from SimPEG import Problem
from SimPEG import Survey
from SimPEG import DataMisfit
from SimPEG import Directives
from SimPEG import Optimization
from SimPEG import Regularization
from SimPEG import InvProblem
from SimPEG import Inversion
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# from pymatsolver import Pardiso
import matplotlib
from ipywidgets import (
interact, FloatSlider, ToggleButtons, IntSlider, FloatText, IntText, SelectMultiple
)
import ipywidgets as widgets
class LinearInversionApp(object):
"""docstring for LinearInversionApp"""
# Parameters for sensitivity matrix, G
N=None
M=None
j_start=None
j_end=None
p=None
q=None
seed=None
# Parameters for Model
m_background= None
m1=None
m2=None
m1_center=None
dm1 =None
m2_center=None
dm2 =None
sigma =None
m_min =None
m_max =None
data=None
save=None
def __init__(self):
super(LinearInversionApp, self).__init__()
@property
def G(self):
return self._G
@property
def jk(self):
return self._jk
@property
def mesh(self):
return self._mesh
def set_G(
self,
N=20,
M=100,
p=-0.25,
q=0.25,
j1=1,
jn=60,
):
"""
Parameters
----------
N: # of data
M: # of model parameters
...
"""
self.N=N
self.M=M
self._mesh=Mesh.TensorMesh([M])
jk=np.linspace(j1, jn, N)
self._G=np.zeros((N, self.mesh.nC), dtype=float, order='C')
def g(k):
return (
np.exp(p*jk[k]*self.mesh.vectorCCx) *
np.cos(np.pi*q*jk[k]*self.mesh.vectorCCx)
)
for i in range(N):
self._G[i, :] = g(i) * self.mesh.hx
self._jk = jk
def plot_G(
self,
N=20,
M=100,
p=-0.25,
q=0.25,
j1=1,
jn=60,
scale='log',
fixed=False,
ymin=-0.001,
ymax=0.011
):
self.set_G(
N=N,
M=M,
p=p,
q=q,
j1=j1,
jn=jn,
)
_, s, _ = np.linalg.svd(self.G, full_matrices=False)
matplotlib.rcParams['font.size']=14
fig=plt.figure(figsize=(10, 4))
gs1 = gridspec.GridSpec(1, 4)
ax1 = plt.subplot(gs1[0, :3])
ax2 = plt.subplot(gs1[0, 3:])
ax1.plot(self.mesh.vectorCCx, self.G.T)
if fixed:
ax1.set_ylim(ymin, ymax)
ax1.set_xlabel("x")
ax1.set_ylabel("g(x)")
ax2.plot(np.arange(self.N)+1, s, 'ro')
ax2.set_xlabel("")
ax2.set_title("singular values", fontsize=12)
ax2.set_xscale(scale)
ax2.set_yscale(scale)
ax2.xaxis.set_major_locator(plt.NullLocator())
ax2.xaxis.set_minor_locator(plt.NullLocator())
ax2.xaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_minor_formatter(plt.NullFormatter())
plt.tight_layout()
plt.show()
def set_model(
self,
m_background=0.,
m1=1.,
m2=-1.,
m1_center=0.2,
dm1=0.2,
m2_center=0.5,
sigma_2=1.,
):
m=np.zeros(self.mesh.nC) + m_background
m1_inds=np.logical_and(self.mesh.vectorCCx > m1_center-dm1/2., self.mesh.vectorCCx < m1_center+dm1/2.)
m[m1_inds]=m1
def gaussian(x,x0,sigma):
return np.exp(-np.power((x - x0)/sigma, 2.)/2.)
m += gaussian(self.mesh.vectorCCx, m2_center, sigma_2) * m2
return m
def plot_model(
self,
m_background=0.,
m1=1.,
m1_center=0.2,
dm1=0.2,
m2=-1.,
m2_center=0.5,
sigma_2=1.,
option="model",
add_noise=True,
percentage =10,
floor=1e-1,
):
m=self.set_model(
m_background=m_background,
m1=m1,
m2=m2,
m1_center=m1_center,
dm1=dm1,
m2_center=m2_center,
sigma_2=sigma_2,
)
np.random.seed(1)
if add_noise:
survey, _=self.get_problem_survey()
data=survey.dpred(m)
noise=abs(data)*percentage * 0.01 *np.random.randn(self.N) + np.random.randn(self.N)*floor
else:
survey, _=self.get_problem_survey()
data=survey.dpred(m)
noise=np.zeros(self.N, float)
data += noise
self.data=data.copy()
self.m=m.copy()
self.uncertainty=abs(self.data) * percentage* 0.01 + floor
self.percentage = percentage
self.floor = floor
option_bools = [False, False, False]
for item in option:
if item == 'kernel':
option_bools[0] = True
elif item == 'model':
option_bools[1] = True
elif item == 'data':
option_bools[2] = True
fig, axes = plt.subplots(1, 3, figsize=(12*1.2, 3*1.2))
for i, ax in enumerate(axes):
if option_bools[i]:
if i == 0:
ax.plot(self.mesh.vectorCCx, self.G.T)
ax.set_title('Rows of matrix G')
ax.set_xlabel("x")
ax.set_ylabel("g(x)")
elif i == 1:
ax.plot(self.mesh.vectorCCx, m)
ax.set_ylim([-2.5, 2.5])
ax.set_title('Model')
ax.set_xlabel("x")
ax.set_ylabel("m(x)")
ax.set_ylabel("$d_j$")
elif i == 2:
if add_noise:
# this is just for visualization of uncertainty
ax.errorbar(
x=self.jk, y=self.data,
yerr=self.uncertainty,
color='k', lw=1
)
ax.plot(self.jk, self.data, 'ko')
else:
ax.plot(self.jk, self.data, 'ko-')
ax.set_title('Data')
ax.set_xlabel("$k_j$")
for i, ax in enumerate(axes):
if not option_bools[i]:
ax.axis('off')
# ax.xaxis.set_minor_locator(plt.NullLocator())
# ax.xaxis.set_major_formatter(plt.NullFormatter())
# ax.xaxis.set_minor_formatter(plt.NullFormatter())
# ax.yaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_minor_locator(plt.NullLocator())
# ax.yaxis.set_major_formatter(plt.NullFormatter())
# ax.yaxis.set_minor_formatter(plt.NullFormatter())
plt.tight_layout()
def get_problem_survey(self):
prob=Problem.LinearProblem(self.mesh, G=self.G)
survey=Survey.LinearSurvey()
survey.pair(prob)
return survey, prob
def run_inversion(
self,
maxIter=60,
m0=0.,
mref=0.,
percentage=5,
floor=0.1,
chifact=1,
beta0_ratio=1.,
coolingFactor=1,
coolingRate=1,
alpha_s=1.,
alpha_x=1.,
use_target=False
):
survey, prob=self.get_problem_survey()
survey.eps=percentage
survey.std=floor
survey.dobs=self.data.copy()
self.uncertainty = percentage*abs(survey.dobs)*0.01 + floor
m0=np.ones(self.M) * m0
mref=np.ones(self.M) * mref
reg=Regularization.Tikhonov(
self.mesh,
alpha_s=alpha_s,
alpha_x=alpha_x,
mref=mref
)
dmis=DataMisfit.l2_DataMisfit(survey)
dmis.W=1./self.uncertainty
opt=Optimization.InexactGaussNewton(
maxIter=maxIter,
maxIterCG=20
)
opt.remember('xc')
opt.tolG=1e-10
opt.eps=1e-10
invProb=InvProblem.BaseInvProblem(dmis, reg, opt)
save=Directives.SaveOutputEveryIteration()
beta_schedule=Directives.BetaSchedule(
coolingFactor=coolingFactor,
coolingRate=coolingRate
)
target=Directives.TargetMisfit(chifact=chifact)
if use_target:
directives=[
Directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio),
beta_schedule,
target,
save
]
else:
directives=[
Directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio),
beta_schedule,
save
]
inv=Inversion.BaseInversion(invProb, directiveList=directives)
mopt=inv.run(m0)
model = opt.recall('xc')
model.append(mopt)
pred = []
for m in model:
pred.append(survey.dpred(m))
return model, pred, save
def plot_inversion(
self,
maxIter=60,
m0=0.,
mref=0.,
percentage=5,
floor=0.1,
chifact=1,
beta0_ratio=1.,
coolingFactor=1,
coolingRate=1,
alpha_s=1.,
alpha_x=1.,
use_target=False,
run=True,
option ='model',
i_iteration=1,
):
if run:
self.model, self.pred, self.save=self.run_inversion(
maxIter=maxIter,
m0=m0,
mref=mref,
percentage=percentage,
floor=floor,
chifact=chifact,
beta0_ratio=beta0_ratio,
coolingFactor=coolingFactor,
coolingRate=coolingRate,
alpha_s=alpha_s,
alpha_x=alpha_x,
use_target=use_target,
)
if len(self.model) == 2:
fig, axes=plt.subplots(1, 2, figsize=(14*1.2 *2/3, 3*1.2))
i_plot = -1
else:
self.save.load_results()
if self.save.i_target is None:
i_plot = -1
else:
i_plot = self.save.i_target + 1
fig, axes=plt.subplots(1, 3, figsize=(14*1.2, 3*1.2))
axes[0].plot(self.mesh.vectorCCx, self.m)
if run:
axes[0].plot(self.mesh.vectorCCx, self.model[i_plot])
axes[0].set_ylim([-2.5, 2.5])
axes[1].errorbar(
x=self.jk, y=self.data,
yerr=self.uncertainty,
color='k', lw=1
)
axes[1].plot(self.jk, self.data, 'ko')
if run:
axes[1].plot(self.jk, self.pred[i_plot], 'bx')
axes[1].legend(("Observed", "Predicted"))
axes[0].legend(("True", "Pred"))
axes[0].set_title('Model')
axes[0].set_xlabel("x")
axes[0].set_ylabel("m(x)")
axes[1].set_title('Data')
axes[1].set_xlabel("$k_j$")
axes[1].set_ylabel("$d_j$")
if len(self.model) > 2:
max_iteration = len(self.model)-1
if i_iteration > max_iteration:
print ((">> Warning: input iteration (%i) is greater than maximum iteration (%i)") % (i_iteration, len(self.model)-1))
i_iteration = max_iteration
if option == 'misfit':
if not run:
axes[0].plot(self.mesh.vectorCCx, self.model[i_iteration])
axes[1].plot(self.jk, self.pred[i_iteration], 'bx')
# axes[0].legend(("True", "Pred", ("%ith")%(i_iteration)))
# axes[1].legend(("Observed", "Predicted", ("%ith")%(i_iteration)))
axes[1].legend(("Observed", "Predicted"))
if i_iteration == 0:
i_iteration = 1
axes[2].plot(np.arange(len(self.save.phi_d))[i_iteration-1]+1, self.save.phi_d[i_iteration-1]*2, 'go', ms=10)
ax_1 = axes[2].twinx()
axes[2].semilogy(np.arange(len(self.save.phi_d))+1, self.save.phi_d*2, 'k-', lw=2)
if self.save.i_target is not None:
axes[2].plot(np.arange(len(self.save.phi_d))[self.save.i_target]+1, self.save.phi_d[self.save.i_target]*2, 'k*', ms=10)
axes[2].plot(np.r_[axes[2].get_xlim()[0], axes[2].get_xlim()[1]], np.ones(2)*self.save.target_misfit*2, 'k:')
ax_1.semilogy(np.arange(len(self.save.phi_d))+1, self.save.phi_m, 'r', lw=2)
axes[2].set_xlabel("Iteration")
axes[2].set_ylabel("$\phi_d$")
ax_1.set_ylabel("$\phi_m$", color='r')
for tl in ax_1.get_yticklabels():
tl.set_color('r')
axes[2].set_title('Misfit curves')
elif option == 'tikhonov':
if not run:
axes[0].plot(self.mesh.vectorCCx, self.model[i_iteration])
axes[1].plot(self.jk, self.pred[i_iteration], 'bx')
# axes[0].legend(("True", "Pred", ("%ith")%(i_iteration)))
# axes[1].legend(("Observed", "Predicted", ("%ith")%(i_iteration)))
axes[0].legend(("True", "Pred"))
axes[1].legend(("Observed", "Predicted"))
if i_iteration == 0:
i_iteration = 1
axes[2].plot(self.save.phi_m[i_iteration-1], self.save.phi_d[i_iteration-1]*2, 'go', ms=10)
axes[2].plot(self.save.phi_m, self.save.phi_d*2, 'k-', lw=2)
axes[2].set_xlim(np.hstack(self.save.phi_m).min(), np.hstack(self.save.phi_m).max())
axes[2].set_xlabel("$\phi_m$", fontsize=14)
axes[2].set_ylabel("$\phi_d$", fontsize=14)
if self.save.i_target is not None:
axes[2].plot(self.save.phi_m[self.save.i_target], self.save.phi_d[self.save.i_target]*2., 'k*', ms=10)
axes[2].set_title('Tikhonov curve')
plt.tight_layout()
def interact_plot_G(self):
Q=interact(
self.plot_G,
N=IntSlider(min=1, max=100, step=1, value=20, continuous_update=False),
M=IntSlider(min=1, max=100, step=1, value=100, continuous_update=False),
p =FloatSlider(min=-1, max=0, step=0.05, value=-0.15, continuous_update=False),
q=FloatSlider(min=0, max=1, step=0.05, value=0.25, continuous_update=False),
j1 =FloatText(value=1.),
jn=FloatText(value=19.),
scale=ToggleButtons(
options=["linear", "log"], value="log"
),
fixed=False,
ymin=FloatText(value=-0.005),
ymax=FloatText(value=0.011),
)
return Q
def interact_plot_model(self):
Q=interact(
self.plot_model,
m_background=FloatSlider(
min=-2, max=2, step=0.05, value=0., continuous_update=False, description="m$_{background}$",
),
m1=FloatSlider(
min=-2, max=2, step=0.05, value=1., continuous_update=False, description="m1",
),
m2=FloatSlider(
min=-2, max=2, step=0.05, value=2., continuous_update=False, description="m2",
),
m1_center=FloatSlider(
min=-2, max=2, step=0.05, value=0.2, continuous_update=False, description="m1$_{center}$",
),
dm1 =FloatSlider(
min=0, max=0.5, step=0.05, value=0.2, continuous_update=False, description="m1$_{width}$",
),
m2_center=FloatSlider(
min=-2, max=2, step=0.05, value=0.75, continuous_update=False, description="m2$_{center}$",
),
sigma_2=FloatSlider(
min=0.01, max=0.1, step=0.01, value=0.07, continuous_update=False, description="m2$_{sigma}$",
),
option=SelectMultiple(
options=["kernel", "model", "data"],
value=["model"],
description='option'
),
percentage=FloatText(value=5),
floor=FloatText(value=0.02),
)
return Q
def interact_plot_inversion(self, maxIter=30):
Q = interact(
self.plot_inversion,
maxIter=IntText(value=maxIter),
m0=FloatSlider(min=-2, max=2, step=0.05, value=0., continuous_update=False),
mref=FloatSlider(min=-2, max=2, step=0.05, value=0., continuous_update=False),
percentage=FloatText(value=self.percentage),
floor=FloatText(value=self.floor),
chifact=FloatText(value=1.),
beta0_ratio=FloatText(value=100),
coolingFactor=FloatSlider(min=0.1, max=10, step=1, value=2, continuous_update=False),
coolingRate=IntSlider(min=1, max=10, step=1, value=1, continuous_update=False),
alpha_s=FloatText(value=1e-10),
alpha_x=FloatText(value=0),
run = True,
target = False,
option=ToggleButtons(
options=["misfit", "tikhonov"], value="misfit"
),
i_iteration=IntSlider(min=0, max=maxIter, step=1, value=0, continuous_update=False)
)
|
mit
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/neighbors/base.py
|
19
|
30908
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist : ndarray
The input distances
weights : {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr : array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
elif callable(metric) or metric in VALID_METRICS['ball_tree']:
alg_check = 'ball_tree'
else:
alg_check = 'brute'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
elif (callable(self.effective_metric_) or
self.effective_metric_ in VALID_METRICS['ball_tree']):
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=self.n_jobs, squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
n_jobs=self.n_jobs,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
mit
|
AlanZatarain/pysal
|
pysal/contrib/viz/mapping.py
|
5
|
24991
|
"""
Choropleth mapping using PySAL and Matplotlib
"""
__author__ = "Sergio Rey <[email protected]>", "Dani Arribas-Bel <[email protected]"
import pysal as ps
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as clrs
import matplotlib as mpl
from matplotlib.pyplot import fill, text
from matplotlib import cm
from matplotlib.patches import Polygon
from matplotlib.collections import PolyCollection, PathCollection, PatchCollection
from mpl_toolkits.basemap import Basemap
from ogr import osr
def transCRS(xy, src_prj, trt_prj):
'''
Re-project a 2D array of xy coordinates from one prj file to another
...
Arguments
---------
xy : ndarray
nx2 array with coordinates to be reprojected. First column
is X axis, second is Y axis
src_prj : str
Path to .prj file of the source Coordinate Reference System
(CRS) of `xy`
trt_prj : str
Path to .prj file of the target Coordinate Reference System
(CRS) to reproject `xy`
Returns
-------
xyp : ndarray
nx2 array with reprojected coordinates. First column
is X axis, second is Y axis
'''
orig = osr.SpatialReference()
orig.ImportFromWkt(open(src_prj).read())
target = osr.SpatialReference()
target.ImportFromWkt(open(trt_prj).read())
trCRS = osr.CoordinateTransformation(orig, target)
return np.array(trCRS.TransformPoints(xy))[:, :2]
def map_poly_shp(shp_link, which='all'):
'''
Create a map object from a shapefile
...
Arguments
---------
shp_link : str
Path to shapefile
which : str/list
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile
'''
shp = ps.open(shp_link)
if which == 'all':
db = ps.open(shp_link.replace('.shp', '.dbf'))
n = len(db.by_col(db.header[0]))
db.close()
which = [True] * n
patches = []
for inwhich, shape in zip(which, shp):
if inwhich:
for ring in shape.parts:
xy = np.array(ring)
patches.append(xy)
return PolyCollection(patches)
def map_poly_shp_lonlat(shp_link, projection='merc'):
'''
Create a map object from a shapefile in lon/lat CRS using Basemap
...
Arguments
---------
shp_link : str
Path to shapefile
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile
Links
-----
.. [1] <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>
'''
shp = ps.open(shp_link)
shps = list(shp)
left, bottom, right, top = shp.bbox
m = Basemap(resolution = 'i', projection=projection,
llcrnrlat=bottom, urcrnrlat=top,
llcrnrlon=left, urcrnrlon=right,
lat_ts=(bottom+top)/2,
lon_0=(right-left)/2, lat_0=(top-bottom)/2)
bounding_box = [m.llcrnrx, m.llcrnry,m.urcrnrx,m.urcrnry]
patches = []
for shape in shps:
parts = []
for ring in shape.parts:
xy = np.array(ring)
x,y = m(xy[:,0], xy[:,1])
x = x / bounding_box[2]
y = y / bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
xy = np.hstack((x,y))
polygon = Polygon(xy, True)
patches.append(polygon)
return PatchCollection(patches)
def setup_ax(polyCos_list, ax=None):
'''
Generate an Axes object for a list of collections
...
Arguments
---------
polyCos_list: list
List of Matplotlib collections (e.g. an object from
map_poly_shp)
ax : AxesSubplot
(Optional) Pre-existing axes to which append the collections
and setup
Returns
-------
ax : AxesSubplot
Rescaled axes object with the collection and without frame
or X/Yaxis
'''
if not ax:
ax = plt.axes()
for polyCo in polyCos_list:
ax.add_collection(polyCo)
ax.autoscale_view()
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
return ax
def plot_poly_lines(shp_link, projection='merc', savein=None, poly_col='none'):
'''
Quick plotting of shapefiles
...
Arguments
---------
shp_link : str
Path to shapefile
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
savein : str
Path to png file where to dump the plot. Optional,
defaults to None
poly_col : str
Face color of polygons
'''
fig = plt.figure()
ax = fig.add_subplot(111)
patchco = map_poly_shp_lonlat(shp_link, projection=projection)
patchco.set_facecolor('none')
ax.add_collection(patchco)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
if savein:
plt.savefig(savein)
else:
plt.show()
return None
def plot_choropleth(shp_link, values, type, k=5, cmap='hot_r', \
projection='merc', sample_fisher=True, title='', \
savein=None, figsize=None):
'''
Wrapper to quickly create and plot from a lat/lon shapefile
...
Arguments
---------
shp_link : str
Path to shapefile
values : array
Numpy array with values to map
type : str
Type of choropleth. Supported methods:
* 'classless'
* 'unique_values'
* 'quantiles' (default)
* 'fisher_jenks'
* 'equal_interval'
k : int
Number of bins to classify values in and assign a color
to
cmap : str
Matplotlib coloring scheme
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
sample_fisher : Boolean
Defaults to True, controls whether Fisher-Jenks
classification uses a sample (faster) or the entire
array of values. Ignored if 'classification'!='fisher_jenks'
title : str
Optional string for the title
savein : str
Path to png file where to dump the plot. Optional,
defaults to None
figsize : tuple
Figure dimensions
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
Links
-----
.. [1] <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>
'''
if type == 'classless':
map_obj = base_choropleth_classless(shp_link, values, cmap=cmap, \
projection=projection)
if type == 'unique_values':
map_obj = base_choropleth_unique(shp_link, values, cmap=cmap, \
projection=projection)
if type == 'quantiles':
map_obj = base_choropleth_classif(shp_link, values, \
classification='quantiles', cmap=cmap, projection=projection)
if type == 'fisher_jenks':
map_obj = base_choropleth_classif(shp_link, values, \
classification='fisher_jenks', cmap=cmap, \
projection=projection, sample_fisher=sample_fisher)
if type == 'equal_interval':
map_obj = base_choropleth_classif(shp_link, values, \
classification='equal_interval', cmap=cmap, projection=projection)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.add_collection(map_obj)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
if title:
ax.set_title(title)
if type=='quantiles' or type=='fisher_jenks' or type=='equal_interval':
cmap = map_obj.get_cmap()
norm = map_obj.norm
boundaries = np.round(map_obj.norm.boundaries, decimals=3)
plt.colorbar(map_obj, cmap=cmap, norm=norm, boundaries=boundaries, \
ticks=boundaries, orientation='horizontal')
if savein:
plt.savefig(savein)
else:
plt.show()
return None
def base_choropleth_classless(shp_link, values, cmap='hot_r', projection='merc'):
'''
Create a map object with classless coloring from a shapefile in lon/lat CRS
...
Arguments
---------
shp_link : str
Path to shapefile
values : array
Numpy array with values to map
cmap : str
Matplotlib coloring scheme
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
classless coloring
Links
-----
.. [1] <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>
'''
cmap = cm.get_cmap(cmap)
map_obj = map_poly_shp_lonlat(shp_link, projection=projection)
map_obj.set_cmap(cmap)
map_obj.set_array(values)
return map_obj
def base_choropleth_unique(shp_link, values, cmap='hot_r', projection='merc'):
'''
Create a map object with coloring based on unique values from a shapefile in lon/lat CRS
...
Arguments
---------
shp_link : str
Path to shapefile
values : array
Numpy array with values to map
cmap : str
Matplotlib coloring scheme
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
Links
-----
.. [1] <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>
'''
uvals = np.unique(values)
colormap = plt.cm.Set1
colors = [colormap(i) for i in np.linspace(0, 0.9, len(uvals))]
colors = np.random.permutation(colors)
colormatch = {val: col for val, col in zip(uvals, colors)}
map_obj = map_poly_shp_lonlat(shp_link, projection=projection)
map_obj.set_color([colormatch[i] for i in values])
map_obj.set_edgecolor('k')
return map_obj
def base_choropleth_classif(shp_link, values, classification='quantiles', \
k=5, cmap='hot_r', projection='merc', sample_fisher=True):
'''
Create a map object with coloring based on different classification
methods, from a shapefile in lon/lat CRS
...
Arguments
---------
shp_link : str
Path to shapefile
values : array
Numpy array with values to map
classification : str
Classificatio method to use. Options supported:
* 'quantiles' (default)
* 'fisher_jenks'
* 'equal_interval'
k : int
Number of bins to classify values in and assign a color
to
cmap : str
Matplotlib coloring scheme
projection : str
Basemap projection. See [1]_ for a list. Defaults to
'merc'
sample_fisher : Boolean
Defaults to True, controls whether Fisher-Jenks
classification uses a sample (faster) or the entire
array of values. Ignored if 'classification'!='fisher_jenks'
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
Links
-----
.. [1] <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>
'''
if classification == 'quantiles':
classification = ps.Quantiles(values, k)
boundaries = classification.bins.tolist()
if classification == 'equal_interval':
classification = ps.Equal_Interval(values, k)
boundaries = classification.bins.tolist()
if classification == 'fisher_jenks':
if sample_fisher:
classification = ps.esda.mapclassify.Fisher_Jenks_Sampled(values,k)
else:
classification = ps.Fisher_Jenks(values,k)
boundaries = classification.bins[:]
map_obj = map_poly_shp_lonlat(shp_link, projection=projection)
map_obj.set_alpha(0.4)
cmap = cm.get_cmap(cmap, k+1)
map_obj.set_cmap(cmap)
boundaries.insert(0,0)
norm = clrs.BoundaryNorm(boundaries, cmap.N)
map_obj.set_norm(norm)
map_obj.set_array(values)
return map_obj
#############################
### Serge's original code ###
#############################
class Map_Projection(object):
"""Map_Projection
Parameters
==========
shapefile: name of shapefile with .shp extension
projection: proj4 projection string
Returns
=======
projected: list of lists
projected coordinates for each shape in the shapefile. Each
sublist contains projected coordinates for parts of a shape
"""
def __init__(self, shapefile, projection='merc'):
super(Map_Projection, self).__init__()
self.projection = projection
shp_reader = ps.open(shapefile)
shps = []
for shp in shp_reader:
shps.append(shp)
left = shp_reader.header['BBOX Xmin']
right = shp_reader.header['BBOX Xmax']
bottom = shp_reader.header['BBOX Ymin']
top = shp_reader.header['BBOX Ymax']
m = Basemap(resolution = 'i', projection='merc',
llcrnrlat=bottom, urcrnrlat=top,
llcrnrlon=left, urcrnrlon=right,
lat_ts=(bottom+top)/2)
projected = []
for shp in shps:
parts = []
for ring in shp.parts:
xy = np.array(ring)
x,y = m(xy[:,0], xy[:,1])
parts.append([x,y])
projected.append(parts)
results = {}
self.projected = projected
self.bounding_box = [m.llcrnrx, m.llcrnry,m.urcrnrx,m.urcrnry]
self.shapefile = shapefile
def equal_interval_map(coords, y, k, title='Equal Interval'):
"""
coords: Map_Projection instance
y: array
variable to map
k: int
number of classes
title: string
map title
"""
classification = ps.Equal_Interval(y,k)
fig = plt.figure()
ax = fig.add_subplot(111)
patches = []
colors = []
i = 0
shape_colors = classification.bins[classification.yb]
shape_colors = y
#classification.bins[classification.yb]
for shp in coords.projected:
for ring in shp:
x,y = ring
x = x / coords.bounding_box[2]
y = y / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
xy = np.hstack((x,y))
polygon = Polygon(xy, True)
patches.append(polygon)
colors.append(shape_colors[i])
i += 1
cmap = cm.get_cmap('hot_r', k+1)
boundaries = classification.bins.tolist()
boundaries.insert(0,0)
norm = clrs.BoundaryNorm(boundaries, cmap.N)
p = PatchCollection(patches, cmap=cmap, alpha=0.4, norm=norm)
colors = np.array(colors)
p.set_array(colors)
ax.add_collection(p)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.colorbar(p, cmap=cmap, norm = norm, boundaries = boundaries, ticks=
boundaries)
plt.show()
return classification
def fisher_jenks_map(coords, y, k, title='Fisher-Jenks', sampled=False):
"""
coords: Map_Projection instance
y: array
variable to map
k: int
number of classes
title: string
map title
sampled: binary
if True classification bins obtained on a sample of y and then
applied. Useful for large n arrays
"""
if sampled:
classification = ps.esda.mapclassify.Fisher_Jenks_Sampled(y,k)
else:
classification = ps.Fisher_Jenks(y,k)
fig = plt.figure()
ax = fig.add_subplot(111)
patches = []
colors = []
i = 0
shape_colors = y
#classification.bins[classification.yb]
for shp in coords.projected:
for ring in shp:
x,y = ring
x = x / coords.bounding_box[2]
y = y / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
xy = np.hstack((x,y))
polygon = Polygon(xy, True)
patches.append(polygon)
colors.append(shape_colors[i])
i += 1
cmap = cm.get_cmap('hot_r', k+1)
boundaries = classification.bins[:]
#print boundaries
#print min(shape_colors) > 0.0
if min(shape_colors) > 0.0:
boundaries.insert(0,0)
else:
boundaries.insert(0, boundaries[0] - boundaries[1])
#print boundaries
norm = clrs.BoundaryNorm(boundaries, cmap.N)
p = PatchCollection(patches, cmap=cmap, alpha=0.4, norm=norm)
colors = np.array(colors)
p.set_array(colors)
ax.add_collection(p)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.colorbar(p, cmap=cmap, norm = norm, boundaries = boundaries, ticks=
boundaries)
plt.show()
return classification
def quantile_map(coords,y,k, title='Quantile'):
"""
Quantile choropleth map
Arguments
=========
coords: Map_Projection instance
y: array
variable to map
k: int
number of classes
title: string
map title
"""
classification = ps.Quantiles(y,k)
fig = plt.figure()
ax = fig.add_subplot(111)
patches = []
colors = []
i = 0
shape_colors = classification.bins[classification.yb]
shape_colors = y
#classification.bins[classification.yb]
for shp in coords.projected:
for ring in shp:
x,y = ring
x = x / coords.bounding_box[2]
y = y / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
xy = np.hstack((x,y))
polygon = Polygon(xy, True)
patches.append(polygon)
colors.append(shape_colors[i])
i += 1
cmap = cm.get_cmap('hot_r', k+1)
boundaries = classification.bins.tolist()
boundaries.insert(0,0)
norm = clrs.BoundaryNorm(boundaries, cmap.N)
p = PatchCollection(patches, cmap=cmap, alpha=0.4, norm=norm)
colors = np.array(colors)
p.set_array(colors)
ax.add_collection(p)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.colorbar(p, cmap=cmap, norm = norm, boundaries = boundaries, ticks=
boundaries)
plt.show()
return classification
def classless_map(coords,y, title='Classless'):
"""
Classless choropleth map
Arguments
=========
coords: Map_Projection instance
y: array
variable to map
title: string
map title
"""
fig = plt.figure()
ax = fig.add_subplot(111)
patches = []
colors = []
i = 0
shape_colors = y
for shp in coords.projected:
for ring in shp:
x,y = ring
x = x / coords.bounding_box[2]
y = y / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
xy = np.hstack((x,y))
polygon = Polygon(xy, True)
patches.append(polygon)
colors.append(shape_colors[i])
i += 1
cmap = cm.get_cmap('hot_r')
p = PatchCollection(patches, cmap=cmap, alpha=0.4)
colors = np.array(colors)
p.set_array(colors)
ax.add_collection(p)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.colorbar(p)
plt.show()
def lisa_cluster_map(coords, lisa, title='LISA Cluster Map', p = 0.05):
"""
LISA Cluster Map
Arguments
=========
coords: Map_Projection instance
lisa: Moran_Local instance
title: string
map title
p: float
p-value to define clusters
"""
# pysal: 1 HH, 2 LH, 3 LL, 4 HL
c ={}
c[0] = 'white' # non-significant
c[1] = 'darkred'
c[2] = 'lightsalmon'
c[3] = 'darkblue'
c[4] = 'lightblue'
q = lisa.q.copy()
yp = lisa.p_sim.copy()
nsig = yp > p
q[nsig] = 0
fig = plt.figure()
ax = fig.add_subplot(111)
i = 0
for shp in coords.projected:
for ring in shp:
x,y = ring
x = x / coords.bounding_box[2]
y = y / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
y.shape = (n,1)
ax.fill(x,y,c[q[i]])
i += 1
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.show()
def unique_values_map(coords,y, title='Unique Value'):
"""
Unique value choropleth
Arguments
=========
coords: Map_Projection instance
y: array
zeros for elements that should not be mapped, 1-4 for elements to
highlight
title: string
map title
Notes
=====
Allows for an unlimited number of categories, but if there are many
categories the colors may be difficult to distinguish.
[Currently designed for use with a Moran_Local Instance for mapping a
subset of the significant LISAs.]
"""
yu = np.unique(y)
colormap = plt.cm.Set1
colors = [colormap(i) for i in np.linspace(0, 0.9, len(yu))]
colors = np.random.permutation(colors)
colormatch = zip(yu, colors)
c = {}
for i in colormatch:
c[i[0]] = i[1]
'''
# pysal: 1 HH, 2 LH, 3 LL, 4 HL
c ={}
c[0] = 'white' # non-significant
c[1] = 'darkred'
c[2] = 'lightsalmon'
c[3] = 'darkblue'
c[4] = 'lightblue'
'''
fig = plt.figure()
ax = fig.add_subplot(111)
i = 0
for shp in coords.projected:
for ring in shp:
x,yc = ring
x = x / coords.bounding_box[2]
yc = yc / coords.bounding_box[3]
n = len(x)
x.shape = (n,1)
yc.shape = (n,1)
ax.fill(x,yc,color=c[y[i]], edgecolor='black')
#ax.fill(x,yc,c[y[i]])
i += 1
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.set_title(title)
plt.show()
if __name__ == '__main__':
shp_link = ps.examples.get_path("sids2.shp")
dbf = ps.open(shp_link.replace('.shp', '.dbf'))
values = np.array(dbf.by_col("SIDR74"))
#values[: values.shape[0]/2] = 1
#values[values.shape[0]/2: ] = 0
#shp_link0 = '/home/dani/Desktop/world/TM_WORLD_BORDERS-0.3.shp'
#shp_link1 = '/home/dani/Desktop/world/world.shp'
'''
which = values > 1.
for shp_link in [shp_link]:
fig = plt.figure()
patchco = map_poly_shp(shp_link)
patchcoB = map_poly_shp(shp_link, which=which)
patchco.set_facecolor('none')
ax = setup_ax([patchco, patchcoB])
fig.add_axes(ax)
plt.show()
break
'''
patchco = map_poly_shp(shp_link)
patchco.set_facecolor('none')
fig = plt.figure()
ax = fig.add_subplot(121)
ax = setup_ax([patchco], ax)
plt.show()
|
bsd-3-clause
|
amozie/amozie
|
testzie/table_test.py
|
1
|
3048
|
import numpy as np
import pandas as pd
lt = 'f:/lt/'
region = pd.read_csv(lt + 'region.csv',sep='\t', index_col=0)
# 排除内蒙古和西藏
# prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '海南', '吉林',
# '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '广西', '重庆', '四川', '贵州', '云南',
# '陕西', '甘肃', '青海', '宁夏', '新疆']
prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '广西', '海南',
'吉林', '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '重庆', '四川', '贵州', '云南',
'陕西', '甘肃', '青海', '宁夏', '新疆', '内蒙古']
years = ['2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012']
worker = pd.read_csv(lt + 'worker.csv', sep='\t', index_col=0).join(region)
capital = pd.read_csv(lt + 'capital.csv', sep='\t', index_col=0).join(region)
energy = pd.read_csv(lt + 'energy.csv', sep='\t', index_col=0).join(region)
gdp = pd.read_csv(lt + 'gdp.csv', sep='\t', index_col=0).join(region)
co2 = pd.read_csv(lt + 'co2.csv', sep='\t', index_col=0).join(region)
table = {'劳动力': worker, '资本': capital, '能源': energy, 'GDP': gdp, 'CO2': co2}
ll = []
ll_indexs = ['劳动力', '资本', '能源', 'GDP', 'CO2']
# ll_columns = ['整体均值', '整体标准差', '东部均值', '东部标准差', '中部均值', '中部标准差', '西部均值', '西部标准差']
ll_columns = ['均值', '标准差', '最小值', '最大值']
for k, v in table.items():
print(k)
df = v.loc[prvs, :]
# 整体
val = df.loc[:, years].values.ravel()
avg = val.mean()
std = np.std(val, ddof=1)
mini = val.min()
maxi = val.max()
# 东部
val1 = df[df.rgn==1].loc[:, years].values.ravel()
avg1 = val1.mean()
std1 = np.std(val1, ddof=1)
# 中部
val2 = df[df.rgn==2].loc[:, years].values.ravel()
avg2 = val2.mean()
std2 = np.std(val2, ddof=1)
# 西部
val3 = df[df.rgn==3].loc[:, years].values.ravel()
avg3 = val3.mean()
std3 = np.std(val3, ddof=1)
print(f'整体\n平均数{avg:.2f}\n标准差{std:.2f}')
print(f'东部\n平均数{avg1:.2f}\n标准差{std1:.2f}')
print(f'中部\n平均数{avg2:.2f}\n标准差{std2:.2f}')
print(f'西部\n平均数{avg3:.2f}\n标准差{std3:.2f}')
# ll.append([avg, std, avg1, std1, avg2, std2, avg3, std3])
ll.append([avg, std, mini, maxi])
arr = np.array(ll)
df = pd.DataFrame(arr, ll_indexs, ll_columns)
df.to_csv(lt + 'table2_300.csv')
df.to_csv(lt + 'table6_290.csv')
df.to_csv(lt + 'table6_300.csv')
# eviews
eviews = pd.read_csv(lt + 'eviews.csv', sep='\t')
# 排除内蒙古
eviews = eviews[eviews.prv_id!=5]
# 整体
eviews.shape
des = eviews.describe()
des.to_csv(lt + 'des.csv')
# 东部
eviews = eviews[eviews.rgn=='东部']
eviews.shape
des = eviews.describe()
des.to_csv(lt + 'des.csv')
pd.Series.rank()
|
apache-2.0
|
lorenzo-desantis/mne-python
|
mne/viz/montage.py
|
13
|
1786
|
"""Functions to plot EEG sensor montages or digitizer montages
"""
import numpy as np
def plot_montage(montage, scale_factor=1.5, show_names=False, show=True):
"""Plot a montage
Parameters
----------
montage : instance of Montage
The montage to visualize.
scale_factor : float
Determines the size of the points. Defaults to 1.5.
show_names : bool
Whether to show the channel names. Defaults to False.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure object.
"""
from ..channels.montage import Montage, DigMontage
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if isinstance(montage, Montage):
pos = montage.pos
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
ch_names = montage.ch_names
for ch_name, x, y, z in zip(ch_names, pos[:, 0],
pos[:, 1], pos[:, 2]):
ax.text(x, y, z, ch_name)
elif isinstance(montage, DigMontage):
pos = np.vstack((montage.hsp, montage.elp))
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
if montage.point_names:
hpi_names = montage.point_names
for hpi_name, x, y, z in zip(hpi_names, montage.elp[:, 0],
montage.elp[:, 1],
montage.elp[:, 2]):
ax.text(x, y, z, hpi_name)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if show:
plt.show()
return fig
|
bsd-3-clause
|
rxa254/VibrationsAndWaves
|
Simulations/TravelingWave-Detail.py
|
1
|
2009
|
#!/usr/bin/env python
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
#plt.style.use('fivethirtyeight')
import numpy as np
mpl.rcParams.update({'text.usetex': True,
'lines.linewidth': 2.5,
'font.size': 16,
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'axes.grid': True,
'axes.labelsize': 'medium',
'grid.alpha': 0.73,
'lines.markersize': 12,
'legend.borderpad': 0.2,
'legend.fancybox': True,
'legend.fontsize': 13,
'legend.framealpha': 0.7,
'legend.handletextpad': 0.1,
'legend.labelspacing': 0.2,
'legend.loc': 'best',
'savefig.dpi': 100,
'pdf.compression': 9})
x = np.arange(0.0, 1.0, 0.001)
A = 1
phi0 = 0
f = 9
omega = 2*np.pi*f
tau = 0.5
L = 1
#s = A * np.sin(2*np.pi*f*t + phi0)
#dy = np.exp(-t/tau)
#s *= dy
fig = plt.figure(11122, figsize=(8,4))
ax = fig.add_subplot(111)
n = 4
t = 0
s = A * np.sin(n*np.pi * x / L - omega*t)
ax.plot(x, s, alpha=0.85,
rasterized = True,
label = "t = 0")
t = 0.01
s = A * np.sin(n*np.pi * x / L - omega*t)
ax.plot(x, s, alpha=0.7,
rasterized = True,
label = "t = " + str(t))
ax.annotate(
r'$y(x,t) = y(x + \Delta x, t + \frac{\Delta x}{v})$',
fontsize='x-small',
xy=(-0.031, -0.97), xycoords='data',
xytext=(0.5, -0.6), textcoords='offset points')
ax.set_xlabel('Displacement [L]')
ax.set_ylabel('Amplitude [mm]')
ax.set_title(r'$y = sin(\frac{2 \pi}{\lambda} (x - v t))$', fontsize=19)
ax.grid(True)
#ax.yaxis.set_ticks(np.arange(-1, 1.1, 0.5))
ax.legend()
plt.savefig("detail-trav.pdf", bbox_inches='tight')
|
mit
|
daniorerio/trackpy
|
doc/sphinxext/plot_generator.py
|
9
|
10081
|
"""
Sphinx plugin to run example scripts and create a gallery page.
Taken from seaborn project, which is turn was lightly
modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
|
bsd-3-clause
|
elkingtonmcb/scikit-learn
|
examples/linear_model/plot_logistic.py
|
312
|
1426
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
|
bsd-3-clause
|
astroswego/plotypus
|
src/plotypus/plotypus.py
|
1
|
17301
|
import numpy
from numpy import std
from sys import exit, stdin, stdout, stderr
from os import path, listdir
from argparse import ArgumentError, ArgumentParser, SUPPRESS
from pandas import read_table
from sklearn.linear_model import (LassoCV, LassoLarsCV, LassoLarsIC,
LinearRegression, RidgeCV, ElasticNetCV)
from sklearn.grid_search import GridSearchCV
from matplotlib import rc_params_from_file
from functools import partial
from itertools import chain, repeat
import plotypus.lightcurve
from plotypus.lightcurve import (make_predictor, get_lightcurve_from_file,
plot_lightcurve)
from plotypus.periodogram import Lomb_Scargle, conditional_entropy
import plotypus
from plotypus.preprocessing import Fourier
from plotypus.utils import mad, pmap, verbose_print
from plotypus.resources import matplotlibrc
import pkg_resources # part of setuptools
__version__ = pkg_resources.require("plotypus")[0].version
def get_args():
parser = ArgumentParser()
general_group = parser.add_argument_group('General')
param_group = parser.add_argument_group('Star Parameters')
parallel_group = parser.add_argument_group('Parallel')
period_group = parser.add_argument_group('Periodogram')
fourier_group = parser.add_argument_group('Fourier')
outlier_group = parser.add_argument_group('Outlier Detection')
# regression_group = parser.add_argument_group('Regression')
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
general_group.add_argument('-i', '--input', type=str,
default=None,
help='location of stellar observations '
'(default = stdin)')
general_group.add_argument('-o', '--output', type=str,
default=None,
help='location of plots, or nothing if no plots are to be generated '
'(default = None)')
general_group.add_argument('-n', '--star-name', type=str,
default=None,
help='name of star '
'(default = name of input file)')
general_group.add_argument('-f', '--format', type=str,
default='%.5f',
help='format specifier for output table')
general_group.add_argument('--output-sep', type=str,
default='\t',
help='column separator string in output table '
'(default = TAB)')
general_group.add_argument('--no-header', action='store_true',
help='suppress header row in table output')
general_group.add_argument('--sanitize-latex', action='store_true',
help='enable to sanitize star names for LaTeX formatting')
general_group.add_argument('--legend', action='store_true',
help='whether legends should be put on the output plots '
'(default = False)')
general_group.add_argument('--extension', type=str,
default='.dat', metavar='EXT',
help='extension which follows a star\'s name in data filenames '
'(default = ".dat")')
general_group.add_argument('--skiprows', type=int,
default=0,
help='number of rows at the head of each file to skip')
general_group.add_argument('--use-cols', type=int, nargs='+',
default=SUPPRESS,
help='columns to use from data file '
'(default = 0 1 2)')
general_group.add_argument('-s', '--scoring', type=str,
choices=['MSE', 'R2'], default=SUPPRESS,
help='scoring metric to use '
'(default = "R2")')
general_group.add_argument('--scoring-cv', type=int,
default=SUPPRESS, metavar='N',
help='number of folds in the scoring regularization_cv validation '
'(default = 3)')
general_group.add_argument('--shift', type=float,
default=None,
help='phase shift to apply to each light curve, or shift to max '
'light if None given '
'(default = None)')
general_group.add_argument('--phase-points', type=int,
default=100, metavar='N',
help='number of phase points to output '
'(default = 100)')
general_group.add_argument('--min-phase-cover', type=float,
default=SUPPRESS, metavar='COVER',
help='minimum fraction of phases that must have points '
'(default = 0)')
general_group.add_argument('--min-observations', type=int,
default=1, metavar='N',
help='minimum number of observation needed to avoid skipping a star '
'(default = 1)')
general_group.add_argument('--matplotlibrc', type=str,
default=matplotlibrc,
metavar='RC',
help='matplotlibrc file to use for formatting plots '
'(default file is in plotypus.resources.matplotlibrc)')
general_group.add_argument('-v', '--verbosity', type=str, action='append',
default=None, choices=['all', 'coverage', 'outlier', 'period'],
metavar='OPERATION',
help='specifies an operation to print verbose output for, or '
'"all" to print all verbose output '
'(default = None)')
param_group.add_argument('--parameters', type=str,
default=None, metavar='FILE',
help='file containing table of parameters such as period and shift '
'(default = None)')
param_group.add_argument('--param-sep', type=str,
default="\\s+",
help='string or regex to use as column separator when reading '
'parameters file '
'(default = any whitespace)')
param_group.add_argument('--period-label', type=str,
default='Period', metavar='LABEL',
help='title of period column in parameters file '
'(default = Period)')
param_group.add_argument('--shift-label', type=str,
default='Shift', metavar='LABEL',
help='title of shift column in parameters file '
'(default = Shift)')
parallel_group.add_argument('--star-processes', type=int,
default=1, metavar='N',
help='number of stars to process in parallel '
'(default = 1)')
parallel_group.add_argument('--selector-processes', type=int,
default=SUPPRESS, metavar='N',
help='number of processes to use for each selector '
'(default depends on selector used)')
parallel_group.add_argument('--scoring-processes', type=int,
default=SUPPRESS, metavar='N',
help='number of processes to use for scoring, if not done by selector '
'(default = 1)')
parallel_group.add_argument('--period-processes', type=int,
default=1, metavar='N',
help='number of periods to process in parallel '
'(default = 1)')
period_group.add_argument('--period', type=float,
default=None,
help='period to use for all stars '
'(default = None)')
period_group.add_argument('--min-period', type=float,
default=SUPPRESS, metavar='P',
help='minimum period of each star '
'(default = 0.2)')
period_group.add_argument('--max-period', type=float,
default=SUPPRESS, metavar='P',
help='maximum period of each star '
'(default = 32.0)')
period_group.add_argument('--coarse-precision', type=float,
default=SUPPRESS,
help='level of granularity on first pass '
'(default = 0.00001)')
period_group.add_argument('--fine-precision', type=float,
default=SUPPRESS,
help='level of granularity on second pass '
'(default = 0.000000001)')
period_group.add_argument('--periodogram', type=str,
choices=["Lomb_Scargle", "conditional_entropy"],
default="Lomb_Scargle",
help='method for determining period '
'(default = Lomb_Scargle)')
fourier_group.add_argument('-d', '--fourier-degree', type=int, nargs=2,
default=(2, 20), metavar=('MIN', 'MAX'),
help='range of degrees of fourier fits to use '
'(default = 2 20)')
fourier_group.add_argument('-r', '--regressor',
choices=['LassoCV', 'LassoLarsCV', 'LassoLarsIC', 'OLS', 'RidgeCV', 'ElasticNetCV'],
default='LassoLarsIC',
help='type of regressor to use '
'(default = "Lasso")')
fourier_group.add_argument('--selector',
choices=['Baart', 'GridSearch'],
default='GridSearch',
help='type of model selector to use '
'(default = "GridSearch")')
fourier_group.add_argument('--series-form', type=str,
default='cos', choices=['sin', 'cos'],
help='form of Fourier series to use in coefficient output, '
'does not affect the fit '
'(default = "cos")')
fourier_group.add_argument('--max-iter', type=int,
default=1000, metavar='N',
help='maximum number of iterations in the regularization path '
'(default = 1000)')
fourier_group.add_argument('--regularization-cv', type=int,
default=None, metavar='N',
help='number of folds used in regularization regularization_cv validation '
'(default = 3)')
outlier_group.add_argument('--sigma', type=float,
default=SUPPRESS,
help='rejection criterion for outliers '
'(default = 20)')
outlier_group.add_argument('--sigma-clipping', type=str,
choices=["std", "mad"], default="mad",
help='sigma clipping metric to use '
'(default = "mad")')
args = parser.parse_args()
if args.output is not None:
rcParams = rc_params_from_file(fname=args.matplotlibrc,
fail_on_error=args.output)
plotypus.lightcurve.matplotlib.rcParams = rcParams
regressor_choices = {
"LassoCV" : LassoCV(max_iter=args.max_iter,
cv=args.regularization_cv,
fit_intercept=False),
"LassoLarsCV" : LassoLarsCV(max_iter=args.max_iter,
cv=args.regularization_cv,
fit_intercept=False),
"LassoLarsIC" : LassoLarsIC(max_iter=args.max_iter,
fit_intercept=False),
"OLS" : LinearRegression(fit_intercept=False),
"RidgeCV" : RidgeCV(cv=args.regularization_cv,
fit_intercept=False),
"ElasticNetCV" : ElasticNetCV(max_iter=args.max_iter,
cv=args.regularization_cv,
fit_intercept=False)
}
selector_choices = {
"Baart" : None,
"GridSearch" : GridSearchCV
}
periodogram_choices = {
"Lomb_Scargle" : Lomb_Scargle,
"conditional_entropy" : conditional_entropy
}
sigma_clipping_choices = {
"std" : std,
"mad" : mad
}
if hasattr(args, 'scoring'):
scoring_choices = {
'R2' : 'r2',
'MSE' : 'mean_squared_error'
}
args.scoring = scoring_choices[args.scoring]
args.regressor = regressor_choices[args.regressor]
Selector = selector_choices[args.selector] or GridSearchCV
args.periodogram = periodogram_choices[args.periodogram]
args.sigma_clipping = sigma_clipping_choices[args.sigma_clipping]
args.predictor = make_predictor(Selector=Selector,
use_baart=(args.selector == 'Baart'),
**vars(args))
args.phases = numpy.arange(0, 1, 1/args.phase_points)
if args.parameters is not None:
args.parameters = read_table(args.parameters, args.param_sep,
index_col=0, engine='python')
return args
def main():
args = get_args()
min_degree, max_degree = args.fourier_degree
filenames = list(map(lambda x: x.strip(), _get_files(args.input)))
filepaths = map(lambda filename:
filename if path.isfile(filename)
else path.join(args.input, filename),
filenames)
# a dict containing all options which can be pickled, because
# all parameters to pmap must be picklable
picklable_args = {k: vars(args)[k]
for k in vars(args)
if k not in {'input'}}
sep = args.output_sep
if not args.no_header:
# print file header
print(*['Name',
'Period',
'Shift',
'Coverage',
'Inliers',
'Outliers',
'R^2',
'MSE',
'MaxDegree',
'Params',
'A_0',
'dA_0',
sep.join(map(('A_{0}' + sep + 'Phi_{0}').format,
range(1, max_degree+1))),
sep.join(map(('R_{0}1' + sep + 'phi_{0}1').format,
range(2, max_degree+1))),
sep.join(map('Phase{}'.format,
range(args.phase_points)))],
sep=sep)
printer = lambda result: _print_star(result, max_degree, args.series_form,
args.format, sep) \
if result is not None else None
pmap(process_star, filepaths, callback=printer,
processes=args.star_processes, **picklable_args)
def process_star(filename, output, *, extension, star_name, period, shift,
parameters, period_label, shift_label, **kwargs):
"""Processes a star's lightcurve, prints its coefficients, and saves
its plotted lightcurve to a file. Returns the result of get_lightcurve.
"""
if star_name is None:
basename = path.basename(filename)
if basename.endswith(extension):
star_name = basename[:-len(extension)]
else:
# file has wrong extension
return
if parameters is not None:
if period is None:
try:
period = parameters[period_label][star_name]
except KeyError:
pass
if shift is None:
try:
shift = parameters.loc[shift_label][star_name]
except KeyError:
pass
result = get_lightcurve_from_file(filename, name=star_name,
period=period, shift=shift,
**kwargs)
if result is None:
return
if output is not None:
plot_lightcurve(star_name, result['lightcurve'], result['period'],
result['phased_data'], output=output, **kwargs)
return result
def _print_star(result, max_degree, form, fmt, sep):
if result is None:
return
# function which formats every number in a sequence according to fmt
format_all = partial(map, lambda x: fmt % x)
# count inliers and outliers
points = result['phased_data'][:,0].size
outliers = numpy.ma.count_masked(result['phased_data'][:, 0])
inliers = points - outliers
# get fourier coefficients and compute ratios
coefs = Fourier.phase_shifted_coefficients(result['coefficients'],
shift=result['shift'],
form=form)
_coefs = numpy.concatenate(([coefs[0]],
[result['dA_0']],
coefs[1:]))
fourier_ratios = Fourier.fourier_ratios(coefs)
# create the vectors of zeroes
coef_zeros = repeat('0', times=(2*max_degree + 1 - len(coefs)))
ratio_zeros = repeat('0', times=(2*(max_degree - 1) - len(fourier_ratios)))
max_degree = numpy.trim_zeros(coefs[1::2], 'b').size
n_params = numpy.count_nonzero(coefs[1::2])
# print the entry for the star with tabs as separators
# and itertools.chain to separate the different results into a
# continuous list which is then unpacked
print(*chain(*[[result['name']],
map(str,
[result['period'], result['shift'], result['coverage'],
inliers, outliers,
result['R2'], result['MSE'],
max_degree, n_params]),
# coefficients and fourier ratios with trailing zeros
# formatted defined by the user-provided fmt string
format_all(_coefs), coef_zeros,
format_all(fourier_ratios), ratio_zeros,
format_all(result['lightcurve'])]),
sep=sep)
def _get_files(input):
if input is None:
return stdin
elif input[0] == "@":
with open(input[1:], 'r') as f:
return map(lambda x: x.strip(), f.readlines())
elif path.isfile(input):
return [input]
elif path.isdir(input):
return sorted(listdir(input))
else:
raise FileNotFoundError('file {} not found'.format(input))
if __name__ == "__main__":
exit(main())
|
gpl-3.0
|
tallakahath/pymatgen
|
pymatgen/electronic_structure/tests/test_plotter.py
|
2
|
4571
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on May 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 1, 2012"
import unittest
import os
import json
from io import open
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import DosPlotter, BSPlotter, plot_ellipsoid, fold_point, plot_brillouin_zone
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.core.structure import Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
import scipy
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "complete_dos.json"), "r",
encoding='utf-8') as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 16,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 10,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
160, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][5], "K",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
19, "wrong number of tick labels")
class PlotBZTest(unittest.TestCase):
def setUp(self):
if not have_matplotlib:
raise unittest.SkipTest("matplotlib not available")
self.rec_latt = Structure.from_file(os.path.join(test_dir, "Si.cssr")).lattice.reciprocal_lattice
self.kpath = [[[0., 0., 0.], [0.5, 0., 0.5], [0.5, 0.25, 0.75], [0.375, 0.375, 0.75]]]
self.labels = {'\\Gamma': [0., 0., 0.], 'K': [0.375, 0.375, 0.75], u'L': [0.5, 0.5, 0.5],
'U': [0.625, 0.25, 0.625], 'W': [0.5, 0.25, 0.75], 'X': [0.5, 0., 0.5]}
self.hessian = [[17.64757034, 3.90159625, -4.77845607],
[3.90159625, 14.88874142, 6.75776076],
[-4.77845607, 6.75776076, 12.12987493]]
self.center = [0.41, 0., 0.41]
self.points = [[0., 0., 0.], [0.5, 0.5, 0.5]]
def test_bz_plot(self):
fig, ax = plot_ellipsoid(self.hessian, self.center, lattice=self.rec_latt)
plot_brillouin_zone(self.rec_latt, lines=self.kpath, labels=self.labels, kpoints=self.points, ax=ax, show=False)
def test_fold_point(self):
self.assertTrue(scipy.allclose(fold_point([0., -0.5, 0.5], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0., 0.5, 0.5])))
self.assertTrue(scipy.allclose(fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2])))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mit
|
IBM/differential-privacy-library
|
tests/models/test_incremental_mean_and_var.py
|
1
|
3983
|
from unittest.case import TestCase
import numpy as np
from sklearn.utils.extmath import _incremental_mean_and_var as sk_incremental_mean_and_var
from diffprivlib.models.standard_scaler import _incremental_mean_and_var
from diffprivlib.utils import PrivacyLeakWarning
class TestIncrementalMeanAndVar(TestCase):
def test_no_range(self):
X = np.random.rand(5, 10)
with self.assertWarns(PrivacyLeakWarning):
_incremental_mean_and_var(X, epsilon=float("inf"), bounds=None, last_mean=0., last_variance=None,
last_sample_count=0)
def test_inf_epsilon(self):
X = np.random.rand(5, 10)
dp_mean, dp_var, dp_count = _incremental_mean_and_var(X, epsilon=float("inf"), bounds=(0, 1), last_mean=0.,
last_variance=None,
last_sample_count=np.zeros(X.shape[1], dtype=np.int64))
sk_mean, sk_var, sk_count = sk_incremental_mean_and_var(X, last_mean=0., last_variance=None,
last_sample_count=np.zeros(X.shape[1], dtype=np.int64))
self.assertTrue(np.allclose(dp_mean, sk_mean))
self.assertIsNone(dp_var)
self.assertIsNone(sk_var)
self.assertTrue((dp_count == sk_count).all())
dp_mean, dp_var, dp_count = _incremental_mean_and_var(X, epsilon=float("inf"), bounds=(0, 1), last_mean=0.,
last_variance=0.,
last_sample_count=np.zeros(X.shape[1], dtype=np.int64))
sk_mean, sk_var, sk_count = sk_incremental_mean_and_var(X, last_mean=0., last_variance=0.,
last_sample_count=np.zeros(X.shape[1], dtype=np.int64))
self.assertTrue(np.allclose(dp_mean, sk_mean))
self.assertTrue(np.allclose(dp_var, sk_var))
self.assertTrue((dp_count == sk_count).all())
def test_increment_inf_epsilon(self):
X = np.ones((5, 1))
dp_mean, dp_var, dp_count = _incremental_mean_and_var(X, epsilon=float("inf"), bounds=(0, 1), last_mean=0.,
last_variance=None, last_sample_count=5)
self.assertAlmostEqual(dp_mean, 0.5, places=5)
self.assertEqual(dp_count, 10)
def test_duplicate_dataset(self):
X = np.random.rand(10, 5)
mean1, var1, count1 = _incremental_mean_and_var(X, epsilon=float("inf"), bounds=(0, 1), last_mean=0.,
last_variance=0., last_sample_count=0)
mean2, var2, count2 = _incremental_mean_and_var(X, epsilon=float("inf"), bounds=(0, 1), last_mean=mean1,
last_variance=var1, last_sample_count=count1)
self.assertTrue(np.allclose(mean1, mean2))
self.assertTrue(np.allclose(var1, var2))
self.assertTrue(np.all(count1 == 10), "Counts should be 10, got %s" % count1)
self.assertTrue(np.all(count2 == 20), "Counts should be 20, got %s" % count2)
def test_different_results(self):
X = np.random.rand(10, 5)
mean1, var1, count1 = _incremental_mean_and_var(X, epsilon=1, bounds=(0, 1), last_mean=0., last_variance=0.,
last_sample_count=0)
mean2, var2, count2 = _incremental_mean_and_var(X, epsilon=1, bounds=(0, 1), last_mean=0, last_variance=0,
last_sample_count=0)
self.assertFalse(np.allclose(mean1, mean2, atol=1e-2))
self.assertFalse(np.allclose(var1, var2, atol=1e-2))
self.assertTrue(np.all(count1 == 10), "Counts should be 10, got %s" % count1)
self.assertTrue(np.all(count2 == 10), "Counts should be 10, got %s" % count2)
|
mit
|
gclenaghan/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
hugobowne/scikit-learn
|
sklearn/metrics/base.py
|
46
|
4627
|
"""
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
|
bsd-3-clause
|
why11002526/keras
|
tests/manual/check_callbacks.py
|
82
|
7540
|
import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
|
mit
|
PredictiveScienceLab/py-mcmc
|
demos/demo2.py
|
2
|
4227
|
"""
This demo demonstrates how to construct a GPy model with a mean function
and train it using the pymcmc module. This model is equivalent to Bayesian
linear regression.
Author:
Ilias Bilionis
Date:
3/20/2014
"""
import numpy as np
import GPy
import pymcmc as pm
import matplotlib.pyplot as plt
# Write a class that represents the mean you wish to use:
class PolynomialBasis(object):
"""
A simple set of polynomials.
:param degree: The degree of the polynomials.
:type degree: int
"""
def __init__(self, degree):
"""
The constructor can do anything you want. The object should be
constructed before doing anything with pymcmc in any case.
Just make sure that inside the constructor you define the ``num_output``
attribute whose value should be equal to the number of basis functions.
"""
self.degree = degree
self.num_output = degree + 1 # YOU HAVE TO DEFINE THIS ATTRIBUTE!
def __call__(self, X):
"""
Evaluate the basis functions at ``X``.
Now, you should assume that ``X`` is a 2D numpy array of size
``num_points x input_dim``. If ``input_dim`` is 1, then you still need
to consider it as a 2D array because this is the kind of data that GPy
requires. If you want to make the function work also with 1D arrays if
``input_dim`` is one the use the trick below.
The output of this function should be the design matrix. That is,
it should be the matrix ``phi`` of dimensions
``num_points x num_output``. In otherwors, ``phi[i, j]`` should be
the value of basis function ``phi_j`` at ``X[i, :]``.
"""
if X.ndim == 1:
X = X[:, None] # Trick for 1D arrays
return np.hstack([X ** i for i in range(self.degree + 1)])
# Pick your degree
degree = 10
# Construct your basis
poly_basis = PolynomialBasis(degree)
# Let us generate some random data to play with
# The number of input dimensions
input_dim = 1
# The number of observations
num_points = 20
# The noise level we are going to add to the observations
noise = 0.1
# Observed inputs
X = np.random.rand(num_points, 1)
# We are going to generate the outputs from the space that is spanned by
# our basis functions and add some noise
# The weights of the basis functions:
weights = np.random.randn(poly_basis.num_output)
weights[2] = 0.
# The observations we make
Y = np.dot(poly_basis(X), weights) + noise * np.random.randn(num_points)
# The output need also be a 2D numpy array
Y = Y[:, None]
# Let's construct a GP model with just a mean and a diagonal covariance
# This is the mean (and at the same time the kernel)
mean = pm.MeanFunction(input_dim, poly_basis, ARD=True)
# Now, let's construct the model
model = GPy.models.GPRegression(X, Y, kernel=mean)
print 'Model before training:'
print str(model)
# You may just train the model by maximizing the likelihood:
model.optimize(messages=True)
print 'Trained model:'
print str(model)
# And just plot the predictions
model.plot(plot_limits=(0, 1))
# Let us also plot the full function
x = np.linspace(0, 1, 50)[:, None]
y = np.dot(poly_basis(x), weights)
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5\% percentile of GP', '95\% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by maximizing the likelihood')
plt.show()
a = raw_input('press enter to continue...')
# Or you might want to do it using MCMC:
new_model = GPy.models.GPRegression(X, Y, kernel=mean)
proposal = pm.MALAProposal(dt=1.)
mcmc = pm.MetropolisHastings(new_model, proposal=proposal)
mcmc.sample(30000, num_thin=100, num_burn=1000, verbose=True)
print 'Model trained with MCMC:'
print str(new_model)
# Plot everything for this too:
new_model.plot(plot_limits=(0, 1))
# Let us also plot the full function
x = np.linspace(0, 1, 50)[:, None]
y = np.dot(poly_basis(x), weights)
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5% percentile of GP', '95% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by MCMC')
plt.show()
a = raw_input('press enter to continue...')
|
lgpl-3.0
|
albertoferna/compmech
|
doc/source/conf.cython.py
|
3
|
14087
|
# -*- coding: utf-8 -*-
#
# CompMech documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 29 13:36:38 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, os.path, re
import itertools
import datetime
import compmech
YEAR = datetime.date.today().strftime('%Y')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('sphinxext'))
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.graphviz',
'matplotlib.sphinxext.plot_directive',
'ipython_console_highlighting',
'cython_highlighting',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.intersphinx'
]
try: import rst2pdf
except ImportError: pass
else: extensions.append('rst2pdf.pdfbuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
exclude_patterns = ['py*', 'build']
# General information about the project.
project = 'CompMech'
authors = 'Saullo G. P. Castro'
copyright = '%s, %s' % (YEAR, authors)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
version = compmech.__version__
# The full version, including alpha/beta/rc tags.
release = version
try:
_match_version = re.compile(r'^\s*_*version\s*_*\s*=\s*["\']([^"\']+)["\'].*').match
with open(os.path.join(os.path.dirname(__file__), '..', 'Cython', 'Shadow.py')) as _f:
for line in itertools.islice(_f, 5): # assume version comes early enough
_m = _match_version(line)
if _m:
release = _m.group(1)
break
else:
print("FAILED TO PARSE PROJECT VERSION !")
except:
pass
# The short X.Y version.
version = re.sub('^([0-9]+[.][0-9]+).*', '\g<1>', release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# todo
todo_include_todos = True
# intersphinx for standard :keyword:s (def, for, etc.)
intersphinx_mapping = {'python': ('http://docs.python.org/3/', None)}
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../logo/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../logo/logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CompMechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
_stdauthor = r'Stefan Behnel, Robert Bradshaw, William Stein\\ Gary Furnish, Dag Seljebotn, Greg Ewing\\ Gabriel Gellner, editor'
latex_documents = [
('src/reference/index', 'reference.tex',
'Cython Reference Guide', _stdauthor, 'manual'),
('src/tutorial/index', 'tutorial.tex',
'Cython Tutorial', _stdauthor, 'manual')
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'CompMech', u'CompMech Documentation',
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CompMech', u'CompMech Documentation',
authors, 'CompMech', 'Computational Mechanics in Python',
'Scientific Computation'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'CompMech'
epub_author = authors
epub_publisher = u''
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# If there is more than one author, separate them with \\.
# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
#
# The options element is a dictionary that lets you override
# this config per-document.
# For example,
# ('index', u'MyProject', u'My Project', u'Author Name',
# dict(pdf_compressed = True))
# would mean that specific document would be compressed
# regardless of the global pdf_compressed setting.
pdf_documents = [
('index', project, project, authors.replace(', ', '\\\\')),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx','kerning','a4']
# A list of folders to search for stylesheets. Example:
pdf_style_path = ['.', '_styles']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
pdf_compressed = True
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
#pdf_language = "en_US"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
#pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
#pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
pdf_use_modindex = False
# If false, no coverpage is generated.
#pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
pdf_use_toc = False
# How many levels deep should the table of contents be?
pdf_toc_depth = 9999
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
|
bsd-3-clause
|
jeffery-do/Vizdoombot
|
doom/lib/python3.5/site-packages/matplotlib/__init__.py
|
4
|
63770
|
"""
This is an object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is
encouraged when programming; pyplot is primarily for working
interactively. The
exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.pyplot.subplots`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod:`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import sys
import distutils.version
from itertools import chain
import io
import inspect
import locale
import os
import re
import tempfile
import warnings
import contextlib
import distutils.sysconfig
import functools
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from matplotlib.cbook import is_string_like, mplDeprecation, dedent, get_label
from matplotlib.compat import subprocess
from matplotlib.rcsetup import (defaultParams,
validate_backend,
cycler)
import numpy
from matplotlib.externals.six.moves.urllib.request import urlopen
from matplotlib.externals.six.moves import reload_module as reload
# Get the version from the _version.py versioneer file. For a git checkout,
# this is computed based on the number of commits since the last tag.
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
__version__numpy__ = str('1.6') # minimum required numpy version
try:
import dateutil
except ImportError:
raise ImportError("matplotlib requires dateutil")
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
if six.PY3:
if isinstance(a, bytes):
a = a.decode('ascii')
if isinstance(b, bytes):
b = b.decode('ascii')
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
return a >= b
else:
return False
if not compare_versions(six.__version__, '1.3'):
raise ImportError(
'six 1.3 or later is required; you have %s' % (
six.__version__))
try:
import pyparsing
except ImportError:
raise ImportError("matplotlib requires pyparsing")
else:
if not compare_versions(pyparsing.__version__, '1.5.6'):
raise ImportError(
"matplotlib requires pyparsing >= 1.5.6")
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
bad_pyparsing = f is None
except TypeError:
bad_pyparsing = True
# pyparsing 1.5.6 does not have <<= on the Forward class, but
# pyparsing 2.0.0 and later will spew deprecation warnings if
# using << instead. Additionally, the <<= in pyparsing 1.5.7 is
# broken, since it doesn't return self. In order to support
# pyparsing 1.5.6 and above with a common code base, this small
# monkey patch is applied.
if bad_pyparsing:
def _forward_ilshift(self, other):
self.__lshift__(other)
return self
pyparsing.Forward.__ilshift__ = _forward_ilshift
if not hasattr(sys, 'argv'): # for modpython
sys.argv = [str('modpython')]
major, minor1, minor2, s, tmp = sys.version_info
_python26 = (major == 2 and minor1 >= 6) or major >= 3
if not _python26:
raise ImportError('matplotlib requires Python 2.6 or later')
if not compare_versions(numpy.__version__, __version__numpy__):
raise ImportError(
'numpy %s or later is required; you have %s' % (
__version__numpy__, numpy.__version__))
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try:
p + '' # test is string like
except TypeError:
return False
# Test whether the operating system thinks it's a writable directory.
# Note that this check is necessary on Google App Engine, because the
# subsequent check will succeed even though p may not be writable.
if not os.access(p, os.W_OK) or not os.path.isdir(p):
return False
# Also test that it is actually possible to write to a file here.
try:
t = tempfile.TemporaryFile(dir=p)
try:
t.write(b'1')
finally:
t.close()
except:
return False
return True
class Verbose(object):
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict([(level, i) for i, level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
# cast to str because we are using unicode_literals,
# and argv is always str
if not arg.startswith(str('--verbose-')):
continue
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_commandLineVerbose = level_str
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
' Legal values are %s' % (level, self.levels))
else:
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = open(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "{0}"'
' for writing.\nCheck your matplotlibrc '
'verbose.fileo setting'.format(fname))
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print(s, file=self.fileo)
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert six.callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt % ret, level)
if not wrapper._spoke:
wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level] >= self.vald[level]
verbose = Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng', '-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
line = stdout.decode('ascii').split('\n')[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
if sys.platform == 'win32':
gs_execs = ['gswin32c', 'gswin64c', 'gs']
else:
gs_execs = ['gs']
for gs_exec in gs_execs:
try:
s = subprocess.Popen(
[gs_exec, '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
if s.returncode == 0:
v = stdout[:-1].decode('ascii')
return gs_exec, v
except (IndexError, ValueError, OSError):
pass
return None, None
def checkdep_tex():
try:
s = subprocess.Popen(['tex', '-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
line = stdout.decode('ascii').split('\n')[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops', '-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stderr.decode('ascii').split('\n')
for line in lines:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_inkscape():
try:
s = subprocess.Popen(['inkscape', '-V'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stdout.decode('ascii').split('\n')
for line in lines:
if 'Inkscape' in line:
v = line.split()[1]
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_xmllint():
try:
s = subprocess.Popen(['xmllint', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stderr.decode('ascii').split('\n')
for line in lines:
if 'version' in line:
v = line.split()[-1]
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_exec, gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg):
pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.')
% (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your '
'system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif (compare_versions(pdftops_v, pdftops_req_alt) and not
compare_versions(pdftops_v, '1.0')):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on '
'your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req):
pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req):
pass
else:
flag = False
warnings.warn('matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_exec, gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg):
pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise, returns None.
:see:
http://mail.python.org/pipermail/python-list/2005-February/325395.html
"""
try:
if six.PY2 and sys.platform == 'win32':
path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding())
else:
path = os.path.expanduser("~")
except ImportError:
# This happens on Google App Engine (pwd module is not present).
pass
else:
if os.path.isdir(path):
return path
for evar in ('HOME', 'USERPROFILE', 'TMP'):
path = os.environ.get(evar)
if path is not None and os.path.isdir(path):
return path
return None
def _create_tmp_config_dir():
"""
If the config directory can not be created, create a temporary
directory.
Returns None if a writable temporary directory could not be created.
"""
import getpass
import tempfile
from matplotlib.cbook import mkdirs
try:
tempdir = tempfile.gettempdir()
except NotImplementedError:
# Some restricted platforms (such as Google App Engine) do not provide
# gettempdir.
return None
try:
username = getpass.getuser()
except KeyError:
username = str(os.getuid())
tempdir = os.path.join(tempdir, 'matplotlib-%s' % username)
os.environ['MPLCONFIGDIR'] = tempdir
mkdirs(tempdir)
return tempdir
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_xdg_config_dir():
"""
Returns the XDG configuration directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CONFIG_HOME')
if path is None:
path = get_home()
if path is not None:
path = os.path.join(path, '.config')
return path
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CACHE_HOME')
if path is None:
path = get_home()
if path is not None:
path = os.path.join(path, '.cache')
return path
def _get_config_or_cache_dir(xdg_base):
from matplotlib.cbook import mkdirs
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
configdir = os.path.abspath(configdir)
if not os.path.exists(configdir):
mkdirs(configdir)
if not _is_writable_dir(configdir):
return _create_tmp_config_dir()
return configdir
p = None
h = get_home()
if h is not None:
p = os.path.join(h, '.matplotlib')
if (sys.platform.startswith('linux') and xdg_base):
p = os.path.join(xdg_base, 'matplotlib')
if p is not None:
if os.path.exists(p):
if _is_writable_dir(p):
return p
else:
try:
mkdirs(p)
except OSError:
pass
else:
return p
return _create_tmp_config_dir()
def _get_configdir():
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2a. On Linux, if `$HOME/.matplotlib` exists, choose that, but warn that
that is the old location. Barring that, follow the XDG specification
and look first in `$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. If possible, create a temporary directory, and use it as the
configuration directory.
5. A writable directory could not be found or created; return None.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_cachedir():
"""
Return the location of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using `$XDG_CACHE_HOME`/`~/.cache` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
get_cachedir = verbose.wrap('CACHEDIR=%s', _get_cachedir, always=False)
def _decode_filesystem_path(path):
if isinstance(path, bytes):
return path.decode(sys.getfilesystemencoding())
else:
return path
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a '
'directory')
return path
_file = _decode_filesystem_path(__file__)
path = os.sep.join([os.path.dirname(_file), 'mpl-data'])
if os.path.isdir(path):
return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
_file = _decode_filesystem_path(matplotlib.afm.__file__)
path = os.sep.join([os.path.dirname(_file), 'mpl-data'])
if os.path.isdir(path):
return path
# py2exe zips pure python, so still need special check
if getattr(sys, 'frozen', None):
exe_path = os.path.dirname(_decode_filesystem_path(sys.executable))
path = os.path.join(exe_path, 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path):
return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
get_example_data is deprecated -- use matplotlib.cbook.get_sample_data
instead
"""
raise NotImplementedError('get_example_data is deprecated -- use '
'matplotlib.cbook.get_sample_data instead')
def get_py2exe_datafiles():
datapath = get_data_path()
_, tail = os.path.split(datapath)
d = {}
for root, _, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return list(d.items())
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- `$PWD/matplotlibrc`
- `$MATPLOTLIBRC/matplotlibrc`
- `$MPLCONFIGDIR/matplotlibrc`
- On Linux,
- `$HOME/.matplotlib/matplotlibrc`, if it exists
- or `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is defined)
- or `$HOME/.config/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is not defined)
- On other platforms,
- `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.
- Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a
system-defined copy.
"""
if six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
fname = os.path.join(cwd, 'matplotlibrc')
if os.path.exists(fname):
return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
configdir = _get_configdir()
if configdir is not None:
fname = os.path.join(configdir, 'matplotlibrc')
if os.path.exists(fname):
home = get_home()
if (sys.platform.startswith('linux') and
home is not None and
os.path.exists(os.path.join(
home, '.matplotlib', 'matplotlibrc'))):
warnings.warn(
"Found matplotlib configuration in ~/.matplotlib/. "
"To conform with the XDG base directory standard, "
"this configuration location has been deprecated "
"on Linux, and the new location is now %s/matplotlib/. "
"Please move your configuration there to ensure that "
"matplotlib will continue to find it in the future." %
_get_xdg_config_dir())
return os.path.join(
home, '.matplotlib', 'matplotlibrc')
return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
# names of keys to deprecate
# the values are a tuple of (new_name, f_old_2_new, f_new_2_old)
# the inverse function may be `None`
_deprecated_map = {
'text.fontstyle': ('font.style', lambda x: x, None),
'text.fontangle': ('font.style', lambda x: x, None),
'text.fontvariant': ('font.variant', lambda x: x, None),
'text.fontweight': ('font.weight', lambda x: x, None),
'text.fontsize': ('font.size', lambda x: x, None),
'tick.size': ('tick.major.size', lambda x: x, None),
'svg.embed_char_paths': ('svg.fonttype',
lambda x: "path" if x else "none", None),
'savefig.extension': ('savefig.format', lambda x: x, None),
'axes.color_cycle': ('axes.prop_cycle', lambda x: cycler('color', x),
lambda x: [c.get('color', None) for c in x]),
}
_deprecated_ignore_map = {
}
_obsolete_set = set(['tk.pythoninspect', ])
_all_deprecated = set(chain(_deprecated_ignore_map,
_deprecated_map, _obsolete_set))
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict((key, converter) for key, (default, converter) in
six.iteritems(defaultParams)
if key not in _all_deprecated)
msg_depr = "%s is deprecated and replaced with %s; please use the latter."
msg_depr_ignore = "%s is deprecated and ignored. Use %s"
# validate values on the way in
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
alt_key, alt_val, inverse_alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt_key))
key = alt_key
val = alt_val(val)
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError("Key %s: %s" % (key, str(ve)))
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.' % (key,))
def __getitem__(self, key):
inverse_alt = None
if key in _deprecated_map:
alt_key, alt_val, inverse_alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt_key))
key = alt_key
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
key = alt
val = dict.__getitem__(self, key)
if inverse_alt is not None:
return inverse_alt(val)
else:
return val
# http://stackoverflow.com/questions/2390827
# (how-to-properly-subclass-dict-and-override-get-set)
# the default dict `update` does not use __setitem__
# so rcParams.update(...) (such as in seaborn) side-steps
# all of the validation over-ride update to force
# through __setitem__
def update(self, *args, **kwargs):
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __repr__(self):
import pprint
class_name = self.__class__.__name__
indent = len(class_name) + 1
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{0}({1})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join('{0}: {1}'.format(k, v)
for k, v in sorted(self.items()))
def keys(self):
"""
Return sorted list of keys.
"""
k = list(dict.keys(self))
k.sort()
return k
def values(self):
"""
Return values in order of sorted keys.
"""
return [self[k] for k in self.keys()]
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
import re
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def rc_params(fail_on_error=False):
"""Return a :class:`matplotlib.RcParams` instance from the
default matplotlib rc file.
"""
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([(key, default) for key, (default, _) in
six.iteritems(defaultParams)
if key not in _all_deprecated])
warnings.warn(message)
return ret
return rc_params_from_file(fname, fail_on_error)
URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
def is_url(filename):
"""Return True if string is an http, ftp, or file URL path."""
return URL_REGEX.match(filename) is not None
def _url_lines(f):
# Compatibility for urlopen in python 3, which yields bytes.
for line in f:
yield line.decode('utf8')
@contextlib.contextmanager
def _open_file_or_url(fname):
if is_url(fname):
f = urlopen(fname)
yield _url_lines(f)
f.close()
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with io.open(fname, encoding=encoding) as f:
yield f
_error_details_fmt = 'line #%d\n\t"%s"\n\tin file "%s"'
def _rc_params_in_file(fname, fail_on_error=False):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
"""
cnt = 0
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line in fd:
cnt += 1
strippedline = line.split('#', 1)[0].strip()
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Illegal %s' % error_details)
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d' %
(fname, cnt))
rc_temp[key] = (val, line, cnt)
except UnicodeDecodeError:
warnings.warn(
('Cannot decode configuration file %s with '
'encoding %s, check LANG and LC_* variables')
% (fname, locale.getpreferredencoding(do_setlocale=False) or
'utf-8 (default)'))
raise
config = RcParams()
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
for key, (val, line, cnt) in six.iteritems(rc_temp):
if key in defaultParams:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
elif key in _deprecated_ignore_map:
warnings.warn('%s is deprecated. Update your matplotlibrc to use '
'%s instead.' % (key, _deprecated_ignore_map[key]))
else:
print("""
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template
or from the matplotlib source distribution""" % (key, cnt, fname),
file=sys.stderr)
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Parameters
----------
fname : str
Name of file parsed for matplotlib settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error)
if not use_default_template:
return config_from_file
iter_params = six.iteritems(defaultParams)
config = RcParams([(key, default) for key, (default, _) in iter_params
if key not in _all_deprecated])
config.update(config_from_file)
verbose.set_level(config['verbose.level'])
verbose.set_fileo(config['verbose.fileo'])
if config['datapath'] is None:
config['datapath'] = get_data_path()
if not config['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""" % '\n'.join(config['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s' % fname)
return config
# this is the instance used by the matplotlib classes
rcParams = rc_params()
if rcParams['examples.directory']:
# paths that are intended to be relative to matplotlib_fname()
# are allowed for the examples.directory parameter.
# However, we will need to fully qualify the path because
# Sphinx requires absolute paths.
if not os.path.isabs(rcParams['examples.directory']):
_basedir, _fname = os.path.split(matplotlib_fname())
# Sometimes matplotlib_fname() can return relative paths,
# Also, using realpath() guarentees that Sphinx will use
# the same path that matplotlib sees (in case of weird symlinks).
_basedir = os.path.realpath(_basedir)
_fullpath = os.path.join(_basedir, rcParams['examples.directory'])
rcParams['examples.directory'] = _fullpath
rcParamsOrig = rcParams.copy()
rcParamsDefault = RcParams([(key, default) for key, (default, converter) in
six.iteritems(defaultParams)
if key not in _all_deprecated])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(
rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k, v in six.iteritems(kwargs):
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name))
def rcdefaults():
"""
Restore the default rc params. These are not the params loaded by
the rc file, but mpl's internal params. See rc_file_defaults for
reloading the default params from the rc file
"""
rcParams.clear()
rcParams.update(rcParamsDefault)
def rc_file(fname):
"""
Update rc params from file.
"""
rcParams.update(rc_params_from_file(fname))
class rc_context(object):
"""
Return a context manager for managing rc settings.
This allows one to do::
with mpl.rc_context(fname='screen.rc'):
plt.plot(x, a)
with mpl.rc_context(fname='print.rc'):
plt.plot(x, b)
plt.plot(x, c)
The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
'screen.rc', while the 'b' vs 'x' plot would have settings from
'print.rc'.
A dictionary can also be passed to the context manager::
with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
plt.plot(x, a)
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid.
"""
def __init__(self, rc=None, fname=None):
self.rcdict = rc
self.fname = fname
self._rcparams = rcParams.copy()
try:
if self.fname:
rc_file(self.fname)
if self.rcdict:
rcParams.update(self.rcdict)
except:
# if anything goes wrong, revert rc parameters and re-raise
rcParams.clear()
rcParams.update(self._rcparams)
raise
def __enter__(self):
return self
def __exit__(self, type, value, tb):
rcParams.update(self._rcparams)
def rc_file_defaults():
"""
Restore the default rc params from the original matplotlib rc that
was loaded
"""
rcParams.update(rcParamsOrig)
_use_error_msg = """ This call to matplotlib.use() has no effect
because the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True, force=False):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. *warn* specifies whether a
warning should be issued if a backend has already been set up.
*force* is an **experimental** flag that tells matplotlib to
attempt to initialize a new backend by reloading the backend
module.
.. note::
This function must be called *before* importing pyplot for
the first time; or, if you are not using pyplot, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and call this after pylab or pyplot have been
loaded. In certain black magic use cases, e.g.
:func:`pyplot.switch_backend`, we are doing the reloading necessary to
make the backend switch work (in some cases, e.g., pure image
backends) so one can set warn=False to suppress the warnings.
To find out which backend is currently set, see
:func:`matplotlib.get_backend`.
"""
# Lets determine the proper backend name first
if arg.startswith('module://'):
name = arg
else:
# Lowercase only non-module backend names (modules are case-sensitive)
arg = arg.lower()
name = validate_backend(arg)
# Check if we've already set up a backend
if 'matplotlib.backends' in sys.modules:
# Warn only if called with a different name
if (rcParams['backend'] != name) and warn:
warnings.warn(_use_error_msg)
# Unless we've been told to force it, just return
if not force:
return
need_reload = True
else:
need_reload = False
# Store the backend name
rcParams['backend'] = name
# If needed we reload here because a lot of setup code is triggered on
# module import. See backends/__init__.py for more detail.
if need_reload:
reload(sys.modules['matplotlib.backends'])
def get_backend():
"""Return the name of the current backend."""
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, e.g., after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
return rcParams['interactive']
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (MATLAB compatible
# flag)
for s in sys.argv[1:]:
# cast to str because we are using unicode_literals,
# and argv is always str
if s.startswith(str('-d')) and len(s) > 2: # look for a -d flag
try:
use(s[2:])
warnings.warn("Using the -d command line argument to select a "
"matplotlib backend is deprecated. Please use the "
"MPLBACKEND environment variable instead.",
mplDeprecation)
break
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, e.g., -debug
else:
# no backend selected from the command line, so we check the environment
# variable MPLBACKEND
try:
use(os.environ['MPLBACKEND'])
except (KeyError, ValueError):
pass
default_test_modules = [
'matplotlib.tests.test_agg',
'matplotlib.tests.test_animation',
'matplotlib.tests.test_arrow_patches',
'matplotlib.tests.test_artist',
'matplotlib.tests.test_axes',
'matplotlib.tests.test_backend_bases',
'matplotlib.tests.test_backend_pdf',
'matplotlib.tests.test_backend_pgf',
'matplotlib.tests.test_backend_ps',
'matplotlib.tests.test_backend_qt4',
'matplotlib.tests.test_backend_qt5',
'matplotlib.tests.test_backend_svg',
'matplotlib.tests.test_basic',
'matplotlib.tests.test_bbox_tight',
'matplotlib.tests.test_cbook',
'matplotlib.tests.test_coding_standards',
'matplotlib.tests.test_collections',
'matplotlib.tests.test_colorbar',
'matplotlib.tests.test_colors',
'matplotlib.tests.test_compare_images',
'matplotlib.tests.test_container',
'matplotlib.tests.test_contour',
'matplotlib.tests.test_dates',
'matplotlib.tests.test_delaunay',
'matplotlib.tests.test_figure',
'matplotlib.tests.test_font_manager',
'matplotlib.tests.test_gridspec',
'matplotlib.tests.test_image',
'matplotlib.tests.test_legend',
'matplotlib.tests.test_lines',
'matplotlib.tests.test_mathtext',
'matplotlib.tests.test_mlab',
'matplotlib.tests.test_offsetbox',
'matplotlib.tests.test_patches',
'matplotlib.tests.test_path',
'matplotlib.tests.test_patheffects',
'matplotlib.tests.test_pickle',
'matplotlib.tests.test_png',
'matplotlib.tests.test_quiver',
'matplotlib.tests.test_rcparams',
'matplotlib.tests.test_scale',
'matplotlib.tests.test_simplification',
'matplotlib.tests.test_spines',
'matplotlib.tests.test_streamplot',
'matplotlib.tests.test_style',
'matplotlib.tests.test_subplots',
'matplotlib.tests.test_table',
'matplotlib.tests.test_text',
'matplotlib.tests.test_texmanager',
'matplotlib.tests.test_ticker',
'matplotlib.tests.test_tightlayout',
'matplotlib.tests.test_transforms',
'matplotlib.tests.test_triangulation',
'matplotlib.tests.test_units',
'matplotlib.tests.test_widgets',
'matplotlib.tests.test_cycles',
'matplotlib.tests.test_labeled_data_unpacking',
'matplotlib.sphinxext.tests.test_tinypages',
'mpl_toolkits.tests.test_mplot3d',
'mpl_toolkits.tests.test_axes_grid1',
'mpl_toolkits.tests.test_axes_grid',
]
def verify_test_dependencies():
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
raise ImportError("matplotlib test data is not installed")
try:
import nose
try:
from unittest import mock
except:
import mock
except ImportError:
print("matplotlib.test requires nose and mock to run.")
raise
def test(verbosity=1):
"""run the matplotlib test suite"""
verify_test_dependencies()
try:
import faulthandler
except ImportError:
pass
else:
faulthandler.enable()
old_backend = rcParams['backend']
try:
use('agg')
import nose
import nose.plugins.builtin
from .testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append(KnownFailure())
plugins.extend([plugin() for plugin in nose.plugins.builtin.plugins])
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run(
defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
def _replacer(data, key):
# if key isn't a string don't bother
if not isinstance(key, six.string_types):
return key
# try to use __getitem__
try:
return data[key]
# key does not exist, silently fall back to key
except KeyError:
return key
_DATA_DOC_APPENDIX = """
Notes
-----
In addition to the above described arguments, this function can take a
**data** keyword argument. If such a **data** argument is given, the
following arguments are replaced by **data[<arg>]**:
{replaced}
"""
def unpack_labeled_data(replace_names=None, replace_all_args=False,
label_namer=None, positional_parameter_names=None):
"""
A decorator to add a 'data' kwarg to any a function. The signature
of the input function must include the ax argument at the first position ::
def foo(ax, *args, **kwargs)
so this is suitable for use with Axes methods.
Parameters
----------
replace_names : list of strings, optional, default: None
The list of parameter names which arguments should be replaced by
`data[name]`. If None, all arguments are replaced if they are
included in `data`.
replace_all_args : bool, default: False
If True, all arguments in *args get replaced, even if they are not
in replace_names.
label_namer : string, optional, default: None
The name of the parameter which argument should be used as label, if
label is not set. If None, the label keyword argument is not set.
positional_parameter_names : list of strings or callable, optional
The full list of positional parameter names (excluding an explicit
`ax`/'self' argument at the first place and including all possible
positional parameter in `*args`), in the right order. Can also include
all other keyword parameter. Only needed if the wrapped function does
contain `*args` and (replace_names is not None or replace_all_args is
False). If it is a callable, it will be called with the actual
tuple of *args and the data and should return a list like
above.
NOTE: callables should only be used when the names and order of *args
can only be determined at runtime. Please use list of names
when the order and names of *args is clear before runtime!
"""
if replace_names is not None:
replace_names = set(replace_names)
def param(func):
new_sig = None
python_has_signature = major >= 3 and minor1 >= 3
python_has_wrapped = major >= 3 and minor1 >= 2
# if in a legacy version of python and IPython is already imported
# try to use their back-ported signature
if not python_has_signature and 'IPython' in sys.modules:
try:
import IPython.utils.signatures
signature = IPython.utils.signatures.signature
Parameter = IPython.utils.signatures.Parameter
except ImportError:
pass
else:
python_has_signature = True
else:
if python_has_signature:
signature = inspect.signature
Parameter = inspect.Parameter
if not python_has_signature:
arg_spec = inspect.getargspec(func)
_arg_names = arg_spec.args
_has_varargs = arg_spec.varargs is not None
_has_varkwargs = arg_spec.keywords is not None
else:
sig = signature(func)
_has_varargs = False
_has_varkwargs = False
_arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
_has_varargs = True
elif p.kind is Parameter.VAR_KEYWORD:
_has_varkwargs = True
else:
_arg_names.append(p.name)
data_param = Parameter('data',
Parameter.KEYWORD_ONLY,
default=None)
if _has_varkwargs:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
# Import-time check: do we have enough information to replace *args?
arg_names_at_runtime = False
# there can't be any positional arguments behind *args and no
# positional args can end up in **kwargs, so only *varargs make
# problems.
# http://stupidpythonideas.blogspot.de/2013/08/arguments-and-parameters.html
if not _has_varargs:
# all args are "named", so no problem
# remove the first "ax" / self arg
arg_names = _arg_names[1:]
else:
# Here we have "unnamed" variables and we need a way to determine
# whether to replace a arg or not
if replace_names is None:
# all argnames should be replaced
arg_names = None
elif len(replace_names) == 0:
# No argnames should be replaced
arg_names = []
elif len(_arg_names) > 1 and (positional_parameter_names is None):
# we got no manual parameter names but more than an 'ax' ...
if len(set(replace_names) - set(_arg_names[1:])) == 0:
# all to be replaced arguments are in the list
arg_names = _arg_names[1:]
else:
msg = ("Got unknown 'replace_names' and wrapped function "
"'%s' uses '*args', need "
"'positional_parameter_names'!")
raise AssertionError(msg % func.__name__)
else:
if positional_parameter_names is not None:
if callable(positional_parameter_names):
# determined by the function at runtime
arg_names_at_runtime = True
# so that we don't compute the label_pos at import time
arg_names = []
else:
arg_names = positional_parameter_names
else:
if replace_all_args:
arg_names = []
else:
msg = ("Got 'replace_names' and wrapped function "
"'%s' uses *args, need "
"'positional_parameter_names' or "
"'replace_all_args'!")
raise AssertionError(msg % func.__name__)
# compute the possible label_namer and label position in positional
# arguments
label_pos = 9999 # bigger than all "possible" argument lists
label_namer_pos = 9999 # bigger than all "possible" argument lists
if (label_namer and # we actually want a label here ...
arg_names and # and we can determine a label in *args ...
(label_namer in arg_names)): # and it is in *args
label_namer_pos = arg_names.index(label_namer)
if "label" in arg_names:
label_pos = arg_names.index("label")
# Check the case we know a label_namer but we can't find it the
# arg_names... Unfortunately the label_namer can be in **kwargs,
# which we can't detect here and which results in a non-set label
# which might surprise the user :-(
if label_namer and not arg_names_at_runtime and not _has_varkwargs:
if not arg_names:
msg = ("label_namer '%s' can't be found as the parameter "
"without 'positional_parameter_names'.")
raise AssertionError(msg % label_namer)
elif label_namer not in arg_names:
msg = ("label_namer '%s' can't be found in the parameter "
"names (known argnames: %s).")
raise AssertionError(msg % (label_namer, arg_names))
else:
# this is the case when the name is in arg_names
pass
@functools.wraps(func)
def inner(ax, *args, **kwargs):
# this is needed because we want to change these values if
# arg_names_at_runtime==True, but python does not allow assigning
# to a variable in a outer scope. So use some new local ones and
# set them to the already computed values.
_label_pos = label_pos
_label_namer_pos = label_namer_pos
_arg_names = arg_names
label = None
data = kwargs.pop('data', None)
if data is not None:
if arg_names_at_runtime:
# update the information about replace names and
# label position
_arg_names = positional_parameter_names(args, data)
if (label_namer and # we actually want a label here ...
_arg_names and # and we can find a label in *args
(label_namer in _arg_names)): # and it is in *args
_label_namer_pos = _arg_names.index(label_namer)
if "label" in _arg_names:
_label_pos = arg_names.index("label")
# save the current label_namer value so that it can be used as
# a label
if _label_namer_pos < len(args):
label = args[_label_namer_pos]
else:
label = kwargs.get(label_namer, None)
# ensure a string, as label can't be anything else
if not isinstance(label, six.string_types):
label = None
if (replace_names is None) or (replace_all_args is True):
# all should be replaced
args = tuple(_replacer(data, a) for
j, a in enumerate(args))
else:
# An arg is replaced if the arg_name of that position is
# in replace_names ...
if len(_arg_names) < len(args):
raise RuntimeError(
"Got more args than function expects")
args = tuple(_replacer(data, a)
if _arg_names[j] in replace_names else a
for j, a in enumerate(args))
if replace_names is None:
# replace all kwargs ...
kwargs = dict((k, _replacer(data, v))
for k, v in six.iteritems(kwargs))
else:
# ... or only if a kwarg of that name is in replace_names
kwargs = dict((k, _replacer(data, v)
if k in replace_names else v)
for k, v in six.iteritems(kwargs))
# replace the label if this func "wants" a label arg and the user
# didn't set one. Note: if the user puts in "label=None", it does
# *NOT* get replaced!
user_supplied_label = (
(len(args) >= _label_pos) or # label is included in args
('label' in kwargs) # ... or in kwargs
)
if (label_namer and not user_supplied_label):
if _label_namer_pos < len(args):
kwargs['label'] = get_label(args[_label_namer_pos], label)
elif label_namer in kwargs:
kwargs['label'] = get_label(kwargs[label_namer], label)
else:
import warnings
msg = ("Tried to set a label via parameter '%s' in "
"func '%s' but couldn't find such an argument. \n"
"(This is a programming error, please report to "
"the matplotlib list!)")
warnings.warn(msg % (label_namer, func.__name__),
RuntimeWarning, stacklevel=2)
return func(ax, *args, **kwargs)
pre_doc = inner.__doc__
if pre_doc is None:
pre_doc = ''
else:
pre_doc = dedent(pre_doc)
_repl = ""
if replace_names is None:
_repl = "* All positional and all keyword arguments."
else:
if len(replace_names) != 0:
_repl = "* All arguments with the following names: '{names}'."
if replace_all_args:
_repl += "\n* All positional arguments."
_repl = _repl.format(names="', '".join(replace_names))
inner.__doc__ = (pre_doc +
_DATA_DOC_APPENDIX.format(replaced=_repl))
if not python_has_wrapped:
inner.__wrapped__ = func
if new_sig is not None:
inner.__signature__ = new_sig
return inner
return param
verbose.report('matplotlib version %s' % __version__)
verbose.report('verbose.level %s' % verbose.level)
verbose.report('interactive is %s' % is_interactive())
verbose.report('platform is %s' % sys.platform)
verbose.report('loaded modules: %s' % six.iterkeys(sys.modules), 'debug')
|
mit
|
clairetang6/bokeh
|
bokeh/sampledata/gapminder.py
|
8
|
2810
|
''' Provide a pandas DataFrame instance of four of the datasets from gapminder.org.
These are read in from csv filess that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'gapminder sample data requires Pandas (http://pandas.pydata.org) to be installed')
from os.path import join
import sys
from . import _data_dir
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
|
RomainBrault/scikit-learn
|
examples/decomposition/plot_pca_3d.py
|
354
|
2432
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
|
bsd-3-clause
|
danielrd6/pyslight
|
pyslight/starlight.py
|
1
|
8113
|
#!/usr/bin/env python
import numpy as np
from ifscube.spectools import get_wl
import pyfits as pf
from scipy.interpolate import interp1d
# import matplotlib as mpl
import matplotlib.pyplot as plt
# import scipy.ndimage as ndim
from scipy.ndimage import gaussian_filter as gf
import subprocess
def perturbation(infile, niterate=3,
slexec='/datassd/starlight/StarlightChains_v04.exe'):
"""
Function to perturb the spectrum and check the stability of the
Starlight solution.
"""
with open(infile, 'r') as f:
a = f.readlines()
specfile = a[-1].split()[0]
outsl = a[-1].split()[-1]
spec = np.loadtxt(specfile, unpack=True)
noise = np.average(spec[2])
c = 0
while c < niterate:
pspec = 'tempspec.txt'
np.savetxt(
pspec, np.column_stack([
spec[0], np.random.normal(spec[1], noise), spec[2], spec[3]]))
with open('temp.in', 'w') as tempin:
for line in a[:-1]:
tempin.write(line)
tempin.write(
a[-1].replace(outsl, 'outsl_{:04d}.txt'.format(c)).
replace(specfile, 'tempspec.txt'))
subprocess.call('{:s} < temp.in'.format(slexec), shell=True)
c += 1
return
def fits2sl(spec, mask=None, dwl=1, integerwl=True, writetxt=False,
errfraction=None, normspec=False, normwl=[6100, 6200],
gauss_convolve=0):
"""
Converts a 1D FITS spectrum to the format accepted by Starlight.
Parameters
----------
spec : string
Name of the FITS spectrum.
mask : None or string
Name of the ASCII file containing the regions to be masked,
as a sequence of initial and final wavelength coordinates, one
pair per line.
dwl : number
The step in wavelength of the resampled spectrum. We recommend
using the standard 1 angstrom.
integerwl : boolean
True if the wavelength coordinates can be written as integers.
writetxt : boolean
True if the function should write an output ASCII file.
errfraction : number
Fraction of the signal to be used in case the uncertainties
are unknown.
gauss_convolve : number
Sigma of the gaussian kernel to convolve with the spectrum.
Returns
-------
slspec : numpy.ndarray
2D array with 4 columns, containing wavelength, flux density,
uncertainty and flags respectively.
"""
# Loading spectrum from FITS file.
a = pf.getdata(spec)
wl = get_wl(spec)
print('Average dispersion: ', np.average(np.diff(wl)))
# Linear interpolation of the spectrum and resampling of
# the spectrum.
f = interp1d(wl, gf(a, gauss_convolve), kind='linear')
if integerwl:
wlrebin = np.arange(int(wl[0]) + 1, int(wl[-1]) - 1)
frebin = f(wlrebin)
mcol = np.ones(len(wlrebin))
if mask is not None:
masktab = np.loadtxt(mask)
for i in range(len(masktab)):
mcol[(wlrebin >= masktab[i, 0]) & (wlrebin <= masktab[i, 1])] = 99
if normspec:
normfactor = 1. / np.median(frebin[(wlrebin > normwl[0]) &
(wlrebin < normwl[1])])
else:
normfactor = 1.0
frebin *= normfactor
if (errfraction is not None) and (mask is not None):
vectors = [wlrebin, frebin, frebin * errfraction, mcol]
txt_format = ['%d', '%.6e', '%.6e', '%d']
elif (errfraction is not None) and (mask is None):
vectors = [wlrebin, frebin, frebin * errfraction]
txt_format = ['%d', '%.6e', '%.6e']
elif (errfraction is None) and (mask is not None):
vectors = [wlrebin, frebin, mcol]
txt_format = ['%d', '%.6e', '%d']
elif (errfraction is None) and (mask is None):
vectors = [wlrebin, frebin]
txt_format = ['%d', '%.6e']
slspec = np.column_stack(vectors)
if writetxt:
np.savetxt(spec.strip('fits') + 'txt', slspec, fmt=txt_format)
return slspec
def readsl(synthfile, full_output=False):
f = open(synthfile, 'r')
a = f.readlines()
f.close()
skpr = [i for i in np.arange(len(a)) if '## Synthetic spectrum' in a[i]][0]
b = np.loadtxt(synthfile, skiprows=skpr + 2)
fobs_norm = float([i.split()[0] for i in a
if '[fobs_norm (in input units)]' in i][0])
b[:, [1, 2]] *= fobs_norm
return b
def plotsl(synthfile, masked=False, overplot=False):
"""
Plots the observed spectrum and overlays the resulting SSP
synthesis.
Parameters
----------
synthfile : string
Name of the ASCII file containing the output of Starlight.
masked : boolean
Ommit the masked regions from the observed spectrum.
Returns
-------
Nothing.
"""
b = readsl(synthfile)
if not overplot:
fig = plt.figure()
ax = fig.add_subplot(111)
if masked:
m = b[:, 3] > 0
ax.plot(b[m, 0], b[m, 1], lw=2)
ax.plot(b[:, 0], b[:, 1])
ax.plot(b[:, 0], b[:, 2])
plt.show()
return
def subtractmodel(synthfile, fitsfile=None, writefits=False):
a = readsl(synthfile)
if fitsfile is None:
b = np.column_stack([a[:, 0], a[:, 1] - a[:, 2]])
else:
wl = get_wl(fitsfile)
f = interp1d(wl, pf.getdata(fitsfile))
m = interp1d(a[:, 0], a[:, 2], bounds_error=False, fill_value=0)
b = np.column_stack([wl, f(wl) - m(wl)])
if writefits:
if fitsfile is None:
print('ERROR! No FITS file given.')
return
pf.writeto(fitsfile[:-4] + 'sp.fits', f(wl) - m(wl),
header=pf.getheader(fitsfile))
return b
def powerlaw_flux(synthfile, wl=5100, alpha=0.5):
with open(synthfile, 'r') as f:
synth = f.readlines()
wln = float(synth[22].split()[0])
fnorm = float(synth[25].split()[0])
xpl = float(synth[108].split()[1])
def powerlaw(wl, wlnorm=4620, alpha=0.5):
return (wl / float(wlnorm)) ** (-1 - alpha)
print(wln, fnorm, xpl)
f_lambda = fnorm * xpl / 100. * powerlaw(wl)
return f_lambda
def sdss2sl(infile, mask=None, dopcor=False, writetxt=False,
outfile='lixo.txt', gauss_convolve=0, normspec=False,
wlnorm=[6000, 6100], dwl=1, integerwl=True):
"""
Creates an ASCII file from the sdss spectrum file.
Parameters
----------
infile : string
Name of the original SDSS file.
mask : string
Name of the ASCII maks definition file.
dopcor : bool
Apply doppler correction based on the redshift from the
infile header.
write
"""
hdul = pf.open(infile)
wl0 = hdul[0].header['crval1']
npoints = np.shape(hdul[0].data)[1]
dwl = hdul[0].header['cd1_1']
wl = 10 ** (wl0 + np.linspace(0, npoints * dwl, npoints))
if dopcor:
wl = wl / (1. + hdul[0].header['z'])
spectrum = hdul[0].data[0, :] * 1e-17 # in ergs/s/cm^2/A
error = hdul[0].data[2, :] * 1e-17 # in ergs/s/cm^2/A
origmask = hdul[0].data[3, :]
print('Average dispersion: ', np.average(np.diff(wl)))
# Linear interpolation of the spectrum and resampling of
# the spectrum.
f = interp1d(wl, gf(spectrum, gauss_convolve), kind='linear')
err = interp1d(wl, gf(error, gauss_convolve), kind='linear')
om = interp1d(wl, gf(origmask, gauss_convolve), kind='linear')
if integerwl:
wlrebin = np.arange(int(wl[0]) + 1, int(wl[-1]) - 1)
frebin = f(wlrebin)
erebin = err(wlrebin)
mrebin = om(wlrebin)
mcol = np.ones(len(wlrebin))
if mask is not None:
masktab = np.loadtxt(mask)
for i in range(len(masktab)):
mcol[(wlrebin >= masktab[i, 0]) & (wlrebin <= masktab[i, 1])] = 99
else:
mcol = mrebin
mcol[mcol > 3] = 99
vectors = [wlrebin, frebin, erebin, mcol]
txt_format = ['%d', '%.6e', '%.6e', '%d']
slspec = np.column_stack(vectors)
if writetxt:
np.savetxt(outfile, slspec, fmt=txt_format)
return slspec
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.