repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/frame/test_alter_axes.py | 7 | 26538 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Index, MultiIndex,
RangeIndex)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAlterAxes(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo'] # noqa
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result, expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
# TODO should set_index check_names ?
assert_frame_equal(result, expected, check_names=False)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assertIsInstance(idf.index, pd.DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = (pd.DatetimeIndex(
pd.tseries.tools.to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00'], errors="raise"))
.tz_localize('US/Pacific'))
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'B')
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = pd.DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'D')
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = pd.DataFrame(
{'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
i = pd.to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'i': i})
self.assertEqual(df.set_index(i).index[0].hour, 11)
self.assertEqual(pd.DatetimeIndex(pd.Series(df.i))[0].hour, 11)
self.assertEqual(df.set_index(df.i).index[0].hour, 11)
def test_set_index_dst(self):
di = pd.date_range('2006-10-29 00:00:00', periods=3,
req='H', tz='US/Pacific')
df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=pd.Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO']))
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns,
pd.Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index,
pd.Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
pd.Index(['bar', 'foo'], name='name'))
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
self.assert_index_equal(renamed.index, new_index)
self.assert_index_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = self.frame.reset_index()
exp = pd.Series(self.frame.index.values, name='index')
self.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = pd.Series(self.frame.index.values, name='level_0')
self.assert_series_equal(rdf['level_0'], exp)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_series_equal(deleveled['index'],
pd.Series(self.frame.index))
self.assert_index_equal(deleveled.index,
pd.Index(np.arange(len(deleveled))))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
assert_frame_equal(rs, self.frame, check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
tm.assertIsInstance(result.index, RangeIndex)
expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
assert_frame_equal(result, expected)
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index(
[df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
| apache-2.0 |
Athemis/charm | charm-cli.py | 1 | 16017 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
charm-cli.py: Simple command line interface for CHarm.
"""
import argparse
import logging
try:
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'sans-serif': 'DejaVu Sans',
'serif': 'DejaVu Serif',
'family': 'sans-serif'})
import matplotlib.pyplot
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
try:
import numpy
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
try:
from LibCharm.Sequence import Sequence
from LibCharm import IO
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
def autolabel(rects, ax, labels, vertical=True):
"""
Automatically adds labels above a bar in a bar graph.
:param rects: list of bars to be labelled (e.g. generated by ax.bar())
:param ax: axis (axis object from matplotlib)
:param labels: list of labels
:param vertical: rotate the labels by 90° if true
"""
if vertical:
rotation = 'vertical'
else:
rotation = 'horizontal'
if len(labels) == len(rects):
heights = []
for rect in rects:
height = rect.get_height()
heights.append(height)
max_height = max(heights)
for rect in rects:
i = rects.index(rect)
label = labels[i]
height = rect.get_height()
if height > 0:
y = 1.05 * height
else:
y = 0.02 * max_height
ax.text(rect.get_x() + rect.get_width() / 2., y, str(label),
ha='center', va='bottom', rotation=rotation, size='x-small')
def plot_codon_usage(sequence, ax):
"""
Plot the codon usage for origin and target host as bar graph
:param sequence: LibCharm.Sequence object
:param ax : matplotlib axis object
"""
x1 = x2 = numpy.arange(len(sequence.codons))
bar_width = 0.5
xlabels = []
origin_f = []
target_f = []
# extract data to plot from sequence object
for c in sequence.codons:
origin_f.append(c['origin_f'])
target_f.append(c['target_f'])
xlabels.append(c['aa'])
# convert lists to numpy arrays
origin_f = numpy.array(origin_f)
target_f = numpy.array(target_f)
# plot data
p1 = ax.bar(x1, origin_f, color='b', width=bar_width)
p2 = ax.bar(x2 + (0.5 * bar_width), target_f, color='r', width=bar_width)
# hide top and right axes
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# set tick parameters
ax.tick_params(axis='both', which='both', direction='out')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# position xticks and labels on x axis to be centered for both bars
ax.set_xticks(x1 + bar_width / 2)
ax.set_xticklabels(xlabels, **{'family': 'monospace'})
ax.set_xlabel('amino acid')
# add a legend to the plot
ax.legend((p1, p2), ('Origin organism', 'Host organism'), loc=2, bbox_to_anchor=(1, 1))
ax.hlines(sequence.lower_threshold, 0, len(x1), colors='k', linestyles='solid', **{'linewidth': 1})
if not sequence.use_frequency:
# set the y axis label
ax.set_ylabel('codon usage [fraction]')
# specify the distance between the ticks on the y axis
major_locator = matplotlib.ticker.MultipleLocator(0.1)
minor_locator = matplotlib.ticker.MultipleLocator(0.01)
else:
# set the y axis label if frequency is used instead of fractions
ax.set_ylabel('codon usage [frequency/1000]')
# specify the distance between the ticks on the y axis
major_locator = matplotlib.ticker.MultipleLocator(10)
minor_locator = matplotlib.ticker.MultipleLocator(1)
# set the distance between the ticks on the y axis
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_minor_locator(minor_locator)
def plot_codon_usage_differences(sequence, ax):
"""
Plot the difference in codon usage for origin and target host as bar graph
:param sequence: LibCharm.Sequence object
:param ax: matplotlib axis object
"""
# Generate a range of residues out of the length of the sequence array
x1 = numpy.arange(len(sequence.codons))
# Set the threshold according to use_frequency
if sequence.use_frequency:
threshold = 5
else:
threshold = 0.2
# Set width of bars
bar_width = 0.8
# Initialize array of labels for the x axis
xlabels = []
# Initialize arrays of data and labels for the bars
df = []
bar_labels = []
# walk over the codons in sequence
for c in sequence.codons:
# add final_df to data array
df.append(c['final_df'])
# add residue to xlabels
xlabels.append(c['aa'])
# generate bar label and add to list
label = u'{} → {}'.format(c['original'], c['new'])
bar_labels.append(label)
# convert lists to numpy arrays
bar_labels = numpy.array(bar_labels)
df = numpy.array(df)
# find bars that exceed the threshold
mask1 = numpy.ma.where(df > threshold)
mask2 = numpy.ma.where(df <= threshold)
# plot and color bars accordingly
p1 = ax.bar(x1[mask1], df[mask1], color='r', width=bar_width)
autolabel(p1, ax, bar_labels[mask1], vertical=True)
p2 = ax.bar(x1[mask2], df[mask2], color='b', width=bar_width)
autolabel(p2, ax, bar_labels[mask2], vertical=True)
# hide top and right axis
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='both', which='both', direction='out')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# set x axis labels to be centered and to use a monospaced font
ax.set_xticks(x1 + bar_width / 2)
ax.set_xticklabels(xlabels, **{'family': 'monospace'})
ax.set_xlabel('amino acid')
ax.set_ylabel(r'Differential codon usage $f_{origin} - f_{host}$')
if not sequence.use_frequency:
major_locator = matplotlib.ticker.MultipleLocator(0.05)
minor_locator = matplotlib.ticker.MultipleLocator(0.01)
else:
major_locator = matplotlib.ticker.MultipleLocator(10)
minor_locator = matplotlib.ticker.MultipleLocator(1)
ax.legend((p1, p2), (u'Δf > {}'.format(threshold), u'Δf ≤ {}'.format(threshold)), loc=2, bbox_to_anchor=(1, 1))
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_minor_locator(minor_locator)
ax.hlines(threshold, 0, len(x1), colors='k', linestyles='dotted', **{'linewidth': 1})
def plot(sequence, prefix=None):
"""
Wrapper for plot_codon_usage_differences and plot_codon_usage
:param sequence: LibCharm.Sequence object
:param prefix: Resulting plot files will be prefixed with 'prefix'
"""
if prefix:
filename = '{}_charm_results.svg'.format(prefix)
else:
filename = 'charm_results.svg'
# Create a plot with two subplots
fig, axarr = matplotlib.pyplot.subplots(2, figsize=(50, 20), dpi=300)
# Actually plot data
plot_codon_usage(sequence, axarr[0])
plot_codon_usage_differences(sequence, axarr[1])
# Save plot as svg
matplotlib.pyplot.savefig(filename, format='svg', orientation='landscape', papertype='a4')
def parse_arguments():
"""
Parse command line arguments and return list of arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
parser.add_argument('-p', '--prefix', type=str, help='prefix for output files')
parser.add_argument('-f', '--frequency', action='store_true', help='use frequency/1000 instead of fraction')
parser.add_argument('-l', '--lower_frequency_alternative', action='store_true',
help='if two codons result in the same difference in codon usage '
'between origin and target host, use the lower frequency alternative')
parser.add_argument('-t', '--threshold', type=float,
help='Lower threshold of codon usage. Defaults to 0.1 and 5 for fraction and '
'frequency respectively')
parser.add_argument('-to', '--translation_table_origin', type=int,
help='id of translation table; Default is: standard genetic code = 1; '
'id corresponds to \'trans_table\' '
'on http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi')
parser.add_argument('-th', '--translation_table_host', type=int,
help='id of translation table; Default is: standard genetic code = 1; '
'id corresponds to \'trans_table\' '
'on http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi')
parser.add_argument('origin', type=int, help='species id of origin organism taken from '
'\'http://www.kazusa.or.jp/codon\' (e.g. \'83333\' for E. coli K12)')
parser.add_argument('host', type=int, help='species id of host organism taken from '
'\'http://www.kazusa.or.jp/codon\' (e.g. \'83333\' for E. coli K12)')
parser.add_argument('input', type=str, help='input file in FASTA format')
args = parser.parse_args()
return args
def initialize_logger(prefix):
"""
Initialization of logging subsystem. Two logging handlers are brought up:
'fh' which logs to a log file and 'ch' which logs to standard output.
:param prefix: prefix that is added to the filename
:return logger: return a logger instance
"""
logger = logging.getLogger('charm-cli')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
try:
if prefix:
log_filename = '{}_charm-cli.log'.format(prefix)
else:
log_filename = 'charm-cli.log'
fh = logging.FileHandler(log_filename, 'w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
except IOError as error:
logger.warning('WARNING: Cannot create log file! Run charm-cli from a directory to '
'which you have write access.')
logger.warning(error.msg)
pass
return logger
def main():
"""
Main function of charm-cli.py.
"""
# Parse command line arguments
args = parse_arguments()
# Initialize logging
logger = initialize_logger(args.prefix)
# Set translation tables according to user input. Defaults to standard genetic code (table 1)
if args.translation_table_origin:
translation_table_origin = args.translation_table_origin
else:
translation_table_origin = 1
if args.translation_table_host:
translation_table_host = args.translation_table_host
else:
translation_table_host = 1
# set threshold if provided by the user and otherwise fall back to defaults
if args.threshold:
lower_threshold = args.threshold
elif args.frequency:
lower_threshold = 5
else:
lower_threshold = 0.1
# initialize Sequence object with user provided input
sequence = Sequence(IO.load_file(args.input), args.origin, args.host,
translation_table_origin=translation_table_origin,
translation_table_host=translation_table_host,
use_frequency=args.frequency,
lower_threshold=lower_threshold,
lower_alternative=args.lower_frequency_alternative)
# harmonize the provided sequence
harmonized_codons = sequence.get_harmonized_codons()
# check if input and output sequence are identical
verify_sequence = sequence.verify_harmonized_sequence()
# log summary to standard output and log file
logger.info('SUMMARY:\n')
if verify_sequence:
text = 'Success! Translation of harmonized and original sequence match:\n\n' \
'{}\n'.format(sequence.harmonized_translated_sequence)
logger.info(text)
else:
logger.error('ERROR: Translations of harmonized and original sequence DO NOT match!')
logger.info('Harmonized codons: {}\n'.format(len(harmonized_codons)))
df_above_thresh = 0
for c in sequence.codons:
if c['final_df'] > 0.2:
df_above_thresh += 1
if df_above_thresh > 0:
logger.warning("WARNING: Difference in origin and target host codon usage of {} out of {} codons ({}%) exceeds 20%!\n".format(df_above_thresh,
len(sequence.codons),
round(df_above_thresh/len(sequence.codons)*100, 1)))
else:
logger.info("Differences of codon usage in origin and target host are within 20%.\n")
table_header = '{:<10} {:^3} {:^4} {:^4} {:^7} {:>6} {:<7} {:>6}'.format('position', 'aa', 'orig', 'new',
'initial', 'final', 'origin', 'target')
logger.info(table_header)
warnings = []
# Iterate over all codons in the sequence and print some statistics and information
for c in sequence.codons:
if str(c['original']) != str(c['new']):
line = '{:<10} {:^3} {:<4} -> {:<4} {:<5.2f} -> {:<3.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],
c['aa'],
c['original'],
c['new'],
c['initial_df'],
c['final_df'],
c['origin_f'],
c['target_f'])
else:
line = '{:<10} {:^3} {:<12} {:<5.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],
c['aa'],
c['original'],
c['initial_df'],
c['origin_f'],
c['target_f'])
if c['ambiguous']:
line += ' WARNING: Original codon is ambiguous!'
warnings.append('Codon {} ({}) coding for {} is ambiguous! {} was chosen for the '
'harmonized sequence!'.format(c['position'],
c['original'],
c['aa'],
c['new']))
logger.info(line)
logger.info('\nCodon-harmonized sequence:\n\n{}'.format(sequence.harmonized_sequence))
if warnings:
logger.warn('\nWARNINGS OCCURRED DURING HARMONIZATION:\n')
for warning in warnings:
logger.warn(warning)
plot(sequence, args.prefix)
# Exit gracefully
exit(0)
if __name__ == "__main__":
main()
| mit |
pythonvietnam/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
raph333/Consensus-residue-contact-calculator | scripts/calculate_networks.py | 1 | 4884 | '''
------------------------------------------------------------------------------
AUTHOR: Raphael Peer, [email protected]
PURPOSE:
Calculation of residue contact networks of all structures in the input-
directory.
OUTPUT:
csv-file of residue contact networks of all input structures.
Each contact is given in the form 'PDB-ID,residue-A,residue-B'. For instance,
'1g16,1,42' means, that in structure 1g16, residue 1 contacts residue 42.
The residues numbers refers to the numbering in the PDB-file.
NOTE:
A contact is defined if any two atoms of two residues are within a certain
distance of each other. This value can be set as an optional argument. By
default, the distance cutoff is 5 Angstrom (note that no hydrogen atoms are
present in the input PDB-files).
------------------------------------------------------------------------------
'''
import argparse
import sys
import os
import numpy as np
import pandas as pd
import networkx as nx
from Bio import PDB
#import time
#start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('processed_pdb_dir', help='Directory with processed '
'PDB-structures for calculation of residue contact '
'networks')
parser.add_argument('cutoff', nargs='?',type=int, default=5,
help='Any two residues which have at least one pair of '
'atoms within this distance are considered to make a '
'contact. If no argument is provided, the default value '
'of 5 Angstrom is used.')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
print 'atomic distance cutoff: %s Angstrom' % args.cutoff
def min_atomic_distance(resA, resB):
'''IN: two residues (Bio.PDB objects)
OUT: shortest distance between any two atoms'''
atoms1 = list(resA.get_iterator())
atoms2 = list(resB.get_iterator())
atomic_dist_matrix = np.zeros((len(atoms1), len(atoms2)), dtype=('f4'))
for i in range(0, len(atoms1)):
for j in range(0, len(atoms2)):
atomic_dist_matrix[i,j] = atoms1[i] - atoms2[j]
return atomic_dist_matrix.min() # minimal atomic distance
def residue_distance_matrix(directory, filename):
'''IN: input directory and pdb-filename
OUT: 2D numpy array of inter-residue distances (NA = large distance)'''
struct = PDB.PDBParser().get_structure(filename, os.path.join(directory,
filename))
res_list = list(struct.get_residues())
dist_matrix = np.zeros((len(res_list), len(res_list)), dtype=('f4'))
dist_matrix[:] = np.nan
for i in range(0, len(res_list)):
for j in range(i, len(res_list)): # start at i: only half matrix
try:
CA_dist = res_list[i]['CA'] - res_list[j]['CA']
if CA_dist <= 15:
atomic_distance = min_atomic_distance(res_list[i],
res_list[j])
dist_matrix[i,j] = atomic_distance
# if the CA-CA distance is above 15A, don't calculate the
# any to any atom distances: reduces runtime to 1/3
except: # if residue does not have CA-atom coordinates
pass
return(dist_matrix)
def matrix_to_dataframe(matrix, pdb_id):
'''IN: all against all residue matrix ('1' means contact)
OUT: dataframe with one contact per line (residue A, residue B)'''
nw = nx.from_numpy_matrix(contact_matrix)
# network has sequential residue numbering starting at 0 (no pdb-numbers)
df = pd.DataFrame(nw.edges()) # only edges with weight '1' are taken
df['pdb_id'] = pdb_id
df.columns = ['res_A', 'res_B', 'pdb_id']
df.res_A += 1 # residue numbering should start at 1
df.res_B += 1
return(df)
filecounter = 0
for filename in os.listdir(args.processed_pdb_dir):
# if not filename.endswith('.pdb'):
# continue # ignore non-PDB files
pdb_id = filename.split('.')[0]
distance_matrix = residue_distance_matrix(args.processed_pdb_dir, filename)
contact_matrix = ((distance_matrix < args.cutoff) & \
(distance_matrix > 0).astype(int))
nw = matrix_to_dataframe(contact_matrix, pdb_id)
filecounter += 1
if filecounter == 1:
networks = nw # initizalize big dataframe to store all networks
else:
networks = pd.concat([networks, nw])
print('(%s/%s) %s' % (filecounter, len(os.listdir(args.processed_pdb_dir)),
pdb_id))
# WRITE ALL NETWORKS TO FILE
networks = networks[['pdb_id', 'res_A', 'res_B']]
networks = networks.sort_index(by=['pdb_id', 'res_A', 'res_B'])
networks.to_csv('results/raw_networks.csv', index=False)
print "Number of residue contact networks calcuated: %s" % filecounter
#end = time.time()
#print "Runtime: %s minutes" % round(((end - start) / float(60)), 1)
| mit |
dare0021/ClusteringBasedID | run.py | 1 | 16148 | import numpy as np
import os
import speakerInfo as sinfo
import infoSingleFile
from unpackMFC import run as unmfc
from pyAudioAnalysis import audioBasicIO, audioFeatureExtraction
from datetime import datetime
import sklearn
from threading import Thread, BoundedSemaphore
import modelStorage as mds
from enum import Enum
WindowGTVmodes = Enum('average', 'midpoint')
# primary inputs
inputPath = "/home/jkih/Music/sukwoo_2min_utt/"
manualTrainTestSet = False
trainLabels = ['kim', 'lee', 'seo', 'yoon']
testLabels = ['joo']
autoflipOutputIfBelow50 = True
# leave blank to ignore
manualTestFile = ""
manualTestDiaFilePath = "joo proc pass 3.wav.diarization.comp"
# outputPath = inputPath + '1 0.1 avg'
outputPath = inputPath + str(datetime.now().time()) + '/'
numSets = 1
numThreads = 3
printTestingTimes = True
normalizeTrainingSet = True
# if true normalizes testing set using the normalization parameters found during the training set normalization
# unless it is a single file testing set, in which case we use a per window normalization
normalizeTestSet = True
windowGTVmode = WindowGTVmodes.average
# in number of the feature vectors used. MFCC is 30ms
# large window sizes leads to OOM failure
# at least I think it's OOM; python quits silently after filling avilable RAM (16GB)
# might be able to batch SVM training? Depends on how svm.fit() works
svmWindowSize = 1000 // 30
# also in number of feature vectors
svmStride = int(svmWindowSize *.1)
# pAA settings
# https://github.com/tyiannak/pyAudioAnalysis/wiki/3.-Feature-Extraction
# in ms
windowSize = 25.625
timeStep = 10
# don't change unless necessary
zeroThresh = 1e-10
featureVectorSize = 13
threadSemaphore = BoundedSemaphore(value=numThreads)
# no touch
featureVectors = dict()
featureVectorCache = dict()
MfccCache = dict()
groundTruths = dict()
lastSpeaker = -1
def clearVariables():
global featureVectors
global groundTruths
global lastSpeaker
featureVectors = dict()
groundTruths = dict()
lastSpeaker = -1
def forgivingFloatEquivalence(value, standard):
return value < -1 * standard - zeroThresh or value > standard + zeroThresh
def pairwiseComparison(a, b):
retval = []
for i, j in zip(a, b):
if i == j:
retval.append(True)
else:
retval.append(False)
return retval
def recallCalc(test, truth):
correct = 0
dividend = 0
for tst, trt in zip(test, truth):
if trt:
dividend += 1
if tst:
correct +=1
return float(correct) / dividend
def validateNormalization(featureVector):
for mean in featureVector.mean(axis=0):
if forgivingFloatEquivalence(mean, 0):
print "WARN: validateNormalization failed with mean " + str(mean)
return False
for stdv in featureVector.std(axis=0):
if forgivingFloatEquivalence(stdv, 1):
print "WARN: validateNormalization failed with stddev " + str(stdv)
return False
return True
def loadFeatureVector(inputPath, featureType, paaFunction = -1):
if featureType == 'mfcc':
loadMFCCFiles(inputPath)
elif featureType == 'paa':
loadWAVwithPAA(inputPath, paaFunction)
else:
print "ERR: unknown feature type", featureType
assert False
def storeFeature(sid, data, filePath):
global featureVectors
global groundTruths
if sid in featureVectors:
featureVectors[sid].append(data)
groundTruths[sid].append(np.full(len(data), sinfo.getTruthValue(filePath), dtype='int8'))
else:
if type(data) is np.ndarray:
data = data.tolist()
featureVectors[sid] = [data]
groundTruths[sid] = [np.full(len(data), sinfo.getTruthValue(filePath), dtype='int8').tolist()]
def loadMFCCFiles(inputPath):
filePaths = [inputPath+f for f in os.listdir(inputPath) if os.path.isfile(inputPath+f) and f.endswith('.mfc')]
for filePath in filePaths:
sid = sinfo.getSpeakerID(filePath)
data = None
if filePath in MfccCache.keys():
data = MfccCache[filePath]
else:
data = unmfc(filePath, featureVectorSize)
MfccCache[filePath] = data
storeFeature(sid, data, filePath)
def loadWAVwithPAA(inputPath, paaFunction):
filePaths = [inputPath+f for f in os.listdir(inputPath) if os.path.isfile(inputPath+f) and f.endswith('.wav')]
for filePath in filePaths:
sid = sinfo.getSpeakerID(filePath)
data = None
if filePath in featureVectorCache.keys():
data = featureVectorCache[filePath]
else:
[Fs, x] = audioBasicIO.readAudioFile(filePath)
assert paaFunction > -1 and paaFunction < 34
data = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.001 * windowSize * Fs, 0.001 * timeStep * Fs)
featureVectorCache[filePath] = data
data = data[paaFunction,:]
# using 1D feature vector breaks my code, sklearn code, and probably the law
if len(np.array(data).shape) < 2:
data = [[datum] for datum in data]
storeFeature(sid, data, filePath)
def windowing(x, y, normalizeEachWindow = False):
def reduceArrDimension(a):
retval = []
for iter in a:
retval.extend(iter)
return retval
newX = []
newY = []
iterRange = len(x) - svmWindowSize + 1
if iterRange % svmStride > 0:
print "WARN: SVM window stride misaligned by:", iterRange % svmStride
i = 0
while i < iterRange:
xi = x[i : i + svmWindowSize]
if normalizeEachWindow:
sklSS = sklearn.preprocessing.StandardScaler()
xi = sklSS.fit_transform(xi)
xi = reduceArrDimension(xi)
newX.append(xi)
if windowGTVmode == WindowGTVmodes.midpoint:
newY.append(y[int(i + svmWindowSize / 2)])
elif windowGTVmode == WindowGTVmodes.average:
newY.append(round(np.mean(y[i : i + svmWindowSize])))
else:
print 'ERR: invalid windowGTVmode:', windowGTVmode
assert False
i += svmStride
return newX, newY
# returns: feature vector array (2D), ground truth array (1D)
def collateData(speakerList, divider = None, subtractor = None, shuffle = False):
def reduceArrDimension(a):
retval = []
for iter in a:
retval.extend(iter)
return retval
x = []
y = []
for speaker in speakerList:
if speaker in featureVectors:
xi = featureVectors[speaker]
yi = groundTruths[speaker]
if shuffle:
rng_state = np.random.get_state()
np.random.shuffle(xi)
np.random.set_state(rng_state)
np.random.shuffle(yi)
else:
print "ERR: unknown speaker", str(speaker)
print featureVectors.keys()
print groundTruths.keys()
assert False
for i in range(len(xi)):
x.append(xi[i])
y.append(yi[i])
x = reduceArrDimension(x)
y = reduceArrDimension(y)
sklSS = sklearn.preprocessing.StandardScaler()
if divider == None:
x = sklSS.fit_transform(x)
if not validateNormalization(x):
print "ERR: data not normalized for speakers " + str(speakerList)
print "Check if bounds are too close"
assert False
elif divider[0] == False:
# Don't normalize
pass
else:
sklSS.scale_ = divider
sklSS.mean_ = subtractor
x = sklSS.transform(x)
if not validateNormalization(x):
print "WARN: data not normalized for speakers " + str(speakerList)
print "divider", divider
print "subtractor", subtractor
x, y = windowing(x, y)
retScale = None
retMean = None
try:
retScale = sklSS.scale_
retMean = sklSS.mean_
except AttributeError:
pass
return x, y, retScale, retMean
def loadManualTestFile(filePath, diarizationFilePath, divider, subtractor):
if not (filePath in MfccCache.keys()):
MfccCache[filePath] = unmfc(filePath, featureVectorSize)
infoSingleFile.init(diarizationFilePath, len(MfccCache[filePath]))
x = MfccCache[filePath]
if not ((divider == None) or (divider[0] == False)):
sklSS = sklearn.preprocessing.StandardScaler()
sklSS.scale_ = divider
sklSS.mean_ = subtractor
x = sklSS.transform(x)
x, y = windowing(x, infoSingleFile.getTruthValues(), True)
x = np.array(x)
if not validateNormalization(x):
print "WARN: data not normalized for the manual test set"
print "divider", divider
print "subtractor", subtractor
return x, y
def getSubset():
if manualTrainTestSet:
datA = None
if not normalizeTrainingSet:
datA = [False]
trainFeatureVector, trainTruthVector, datA, datB = collateData(trainLabels, shuffle = True, divider = datA)
if not normalizeTestSet:
datA = [False]
if len(manualTestFile) > 0:
testFeatureVector, testTruthVector = loadManualTestFile(manualTestFile, manualTestDiaFilePath, datA, datB)
else:
testFeatureVector, testTruthVector, datA, datB = collateData(testLabels, datA, datB, True)
else:
global lastSpeaker
testSpeaker = lastSpeaker + 1
if testSpeaker >= len(featureVectors.keys()):
testSpeaker = 0
speakers = featureVectors.keys()
datA = None
if not normalizeTrainingSet:
datA = [False]
trainFeatureVector, trainTruthVector, datA, datB = collateData([speaker for speaker in speakers if speaker != speakers[testSpeaker]], shuffle = True, divider = datA)
if not normalizeTestSet:
datA = [False]
testFeatureVector, testTruthVector, datA, datB = collateData([speakers[testSpeaker]], datA, datB, True)
lastSpeaker = testSpeaker
print "Testing with speaker #" + str(testSpeaker) + ", label: " + str(speakers[testSpeaker])
return trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector
# flips 0 to 1 and non-0 to 0 for any given 1D array
def flipTruthValues(truthVect):
def flip(item):
if item == 0:
return 1
return 0
return map(flip, truthVect)
def modelProcess(modelFunc, tag, ms):
global threadSemaphore
def resetModel():
if ms.args == 'ensembleOverride':
return modelFunc
elif ms.args != None:
return modelFunc(ms.args)
else:
return modelFunc()
gtvWasFlipped = False
trainFeatureVector = ms.trainFeatureVector
trainTruthVector = ms.trainTruthVector
testFeatureVector = ms.testFeatureVector
testTruthVector = ms.testTruthVector
model = resetModel()
model.fit(trainFeatureVector, trainTruthVector)
accuracy = -1
f1 = -1
try:
if modelFunc == mds.model_MiniK:
model = resetModel()
model.dummyattributethatdoesntexist
# MiniK score is not accuracy
# raise an attribute error to skip in to the hand-written accuracy code
if printTestingTimes:
print 'TESTING BEGIN', datetime.now()
predicted_labels = model.predict(testFeatureVector)
if printTestingTimes:
print 'TESTING END', datetime.now()
accuracy = model.score(testFeatureVector, testTruthVector)
if autoflipOutputIfBelow50 and accuracy < .5:
accuracy = 1 - accuracy
gtvWasFlipped = True
testTruthVector = flipTruthValues(testTruthVector)
f1 = sklearn.metrics.f1_score(testTruthVector, predicted_labels)
except AttributeError:
# some models only have online modes
if printTestingTimes:
print 'TESTING BEGIN', datetime.now()
predicted_labels = model.fit_predict(testFeatureVector)
if printTestingTimes:
print 'TESTING END', datetime.now()
accuracy = float(pairwiseComparison(predicted_labels, testTruthVector).count(True)) / len(testTruthVector)
if autoflipOutputIfBelow50 and accuracy < .5:
accuracy = 1 - accuracy
gtvWasFlipped = True
testTruthVector = flipTruthValues(testTruthVector)
recall = recallCalc(predicted_labels, testTruthVector)
f1 = float(2) * accuracy * recall / (accuracy + recall)
if accuracy < 0 or accuracy > 1:
print 'INVALID ACC ' + str(accuracy)
print 'MODEL ' + str(model)
print str(predicted_labels)
print str(testTruthVector)
os.exit
elif f1 < 0 or f1 > 1:
print 'INVALID F1 ' + str(f1)
print 'MODEL ' + str(model)
print str(predicted_labels)
print str(testTruthVector)
os.exit
f = open(outputPath + tag + '.log', 'w')
f.write('accuracy: ' + str(accuracy) + '\tf1: ' + str(f1))
f.write('\n')
f.write('predicted labels followed by truth values')
f.write('\n')
f.write(str(predicted_labels.tolist()))
f.write('\n')
f.write(str(testTruthVector))
f.write('\n')
f.write('Ground Truth Values Auto-Flipped: ' + str(gtvWasFlipped))
f.close()
threadSemaphore.release()
def runPaaFunctions():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
for paaFunction in [21, 20]:
print "Running feature extraction #" + str(paaFunction)
clearVariables()
loadFeatureVector(inputPath, 'paa', paaFunction)
for i in range(numSets * len(featureVectors.keys())):
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
ms = mds.ModelSettings(i, paaFunction, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, featureVectors.keys()[lastSpeaker])
mds.runAllModelsPAA(ms, windowSize, iterDone, iterTotal)
def runSphinxFiles():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
iterlen = numSets * len(featureVectors.keys())
for i in range(iterlen):
print "PROCESSING: " + str(i) + " / " + str(iterlen)
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, featureVectors.keys()[lastSpeaker])
mds.runAllModelsMFCC(ms, iterDone, iterTotal)
def runRBFvariants():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
for i in range(iterlen):
print "PROCESSING: " + str(i) + " / " + str(iterlen)
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker)
mds.runRBFvariantsGamma(ms, [0.015], i, iterlen)
# mds.runRBFvariants2DList(ms, [1, 10, 50, 100], [50, 0.01, 0.02, 0.03, 0.04, 0.5, 2, .78125, .617284], i, iterlen)
# mds.runRBFvariantsCList(ms, np.arange(1.98, 3, 0.02), 0.03, i, iterlen)
# mds.runRBFvariantsCList(ms, [1], 0.03, i, iterlen)
def runRandomForest():
global outputPath
outputPathPrefix = outputPath
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
forestCount = [1024, 2048, 3072, 4096, 5121, 6045, 8193]
maxDepth = [3, 5, 10, 20]
mds.resetETAtimer(iterlen * len(forestCount) * len(maxDepth))
for fc in forestCount:
for md in maxDepth:
for i in range(iterlen):
# outputPath = outputPathPrefix + ' ' + str(fc) + 'forests ' + str(md) + 'depth/'
if not os.path.exists(outputPath):
os.mkdir(outputPath)
print "PROCESSING: " + str(i) + " / " + str(iterlen) + ' ' + str(fc) + ' forests ' + str(md) + ' depth'
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker, mds.factory_RandomForest(fc, 4, md))
mds.runModel(mds.model_RandomForest, 'MFCC_' + str(ms.paaFunction) + '_RandomForest_fc_' + str(fc) + '_md_' + str(md) + '_' + str(ms.i) + '_' + ms.speakerName, ms)
mds.incrementETAtimer()
def runSvmRfEnsemble():
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
forestCount = 4096
maxDepth = 3
gamma = 0.015
cVal = 1
if not os.path.exists(outputPath):
os.mkdir(outputPath)
mds.resetETAtimer(iterlen)
for i in range(iterlen):
fc = forestCount
md = maxDepth
g = gamma
c = cVal
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker, 'ensembleOverride')
mds.runModel(mds.ensemble_VotingSvmRf(g, c, fc, md), 'MFCC_' + str(ms.paaFunction) + '_E_SVMRF_fc_' + str(fc) + '_md_' + str(md) + '_' + str(ms.i) + '_' + ms.speakerName, ms)
mds.incrementETAtimer()
mds.init(threadSemaphore, modelProcess)
# runPaaFunctions()
# runSphinxFiles()
# runRBFvariants()
# runRandomForest()
runSvmRfEnsemble() | mit |
OmnesRes/pan_cancer | paper/figures/figure_2/gene_set_overlap/gene_sets.py | 1 | 16387 | ##script for finding the overlap in the top 100 most significant gene sets from msigdb for good and bad genes
##load necessary modules
import pylab as plt
import numpy as np
import math
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##I did not write this function, from http://depts.washington.edu/clawpack/clawpack-4.6.3/python/pyclaw/plotters/colormaps.py
##-------------------------
def make_colormap(colors):
##-------------------------
"""
Define a new color map based on values specified in the dictionary
colors, where colors[z] is the color that value z should be mapped to,
with linear interpolation between the given values of z.
The z values (dictionary keys) are real numbers and the values
colors[z] can be either an RGB list, e.g. [1,0,0] for red, or an
html hex string, e.g. "#ff0000" for red.
"""
from matplotlib.colors import LinearSegmentedColormap, ColorConverter
from numpy import sort
z = sort(colors.keys())
n = len(z)
z1 = min(z)
zn = max(z)
x0 = (z - z1) / (zn - z1)
CC = ColorConverter()
R = []
G = []
B = []
for i in range(n):
#i'th color at level z[i]:
Ci = colors[z[i]]
if type(Ci) == str:
# a hex string of form '#ff0000' for example (for red)
RGB = CC.to_rgb(Ci)
else:
# assume it's an RGB triple already:
RGB = Ci
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
cmap_dict = {}
cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]
cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]
cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]
mymap = LinearSegmentedColormap('mymap',cmap_dict)
return mymap
##get the 100 most enriched protective and harmful gene sets for each cancer
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','good_overlap'))
BLCA_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
BLCA_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','bad_overlap'))
BLCA_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
BLCA_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','good_overlap'))
LGG_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LGG_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','bad_overlap'))
LGG_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LGG_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','good_overlap'))
BRCA_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
BRCA_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','bad_overlap'))
BRCA_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
BRCA_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','good_overlap'))
CESC_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
CESC_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','bad_overlap'))
CESC_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
CESC_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','good_overlap'))
COAD_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
COAD_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','bad_overlap'))
COAD_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
COAD_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','good_overlap'))
GBM_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
GBM_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','bad_overlap'))
GBM_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
GBM_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','good_overlap'))
HNSC_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
HNSC_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','bad_overlap'))
HNSC_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
HNSC_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','good_overlap'))
KIRC_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
KIRC_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','bad_overlap'))
KIRC_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
KIRC_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','good_overlap'))
KIRP_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
KIRP_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','bad_overlap'))
KIRP_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
KIRP_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','good_overlap'))
LAML_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LAML_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','bad_overlap'))
LAML_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LAML_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','good_overlap'))
LIHC_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LIHC_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','bad_overlap'))
LIHC_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LIHC_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','good_overlap'))
LUAD_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LUAD_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','bad_overlap'))
LUAD_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LUAD_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','good_overlap'))
LUSC_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LUSC_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','bad_overlap'))
LUSC_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
LUSC_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','good_overlap'))
SKCM_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
SKCM_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','bad_overlap'))
SKCM_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
SKCM_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','OV','good_overlap'))
OV_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
OV_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','OV','bad_overlap'))
OV_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
OV_bad.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','good_overlap'))
STAD_good=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
STAD_good.append(x.split('\t')[0])
x=f.readline()
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','bad_overlap'))
STAD_bad=[]
x=f.readline()
while x!='':
x=f.readline()
if x=='Gene Set Name\t# Genes in Gene Set (K)\tDescription\t# Genes in Overlap (k)\tk/K\tp-value\tFDR q-value\n':
x=f.readline()
while x!='\n':
STAD_bad.append(x.split('\t')[0])
x=f.readline()
all_cancers1=[BLCA_good,BRCA_good,CESC_good,COAD_good,GBM_good,\
HNSC_good,KIRC_good,KIRP_good,LAML_good,LGG_good,LIHC_good,\
LUAD_good,LUSC_good,OV_good,SKCM_good,STAD_good]
all_cancers2=[BLCA_bad,BRCA_bad,CESC_bad,COAD_bad,GBM_bad,\
HNSC_bad,KIRC_bad,KIRP_bad,LAML_bad,LGG_bad,LIHC_bad,\
LUAD_bad,LUSC_bad,OV_bad,SKCM_bad,STAD_bad]
##create a list of lists of the overlaps, use all_cancers1 for good overlaps, all_cancers2 for bad overlaps
final_array=[]
for i in all_cancers2[::-1]:
temp=[]
for j in all_cancers2[::-1]:
temp.append(len([k for k in j if k in i]))
final_array.append(temp)
##plotting, use blue_yellow_red1 cmap for good overlaps, blue_yellow_red2 for bad overlaps
blue_yellow_red1 = make_colormap({0:'#00005C',.05:'#0000D0',.14:'#01BBCF',.15:'#33CC33',.2:'#FFFF00',.27:'#FF9900',.33:'#B47603',.35:'#A32900',1:'#751E00'})
blue_yellow_red2 = make_colormap({0:'#00005C',.05:'#0000D0',.14:'#01BBCF',.15:'#33CC33',.25:'#FFFF00',.3:'#FF9900',.38:'#B47603',.45:'#A32900',1:'#751E00'})
Z=np.array(final_array)
mask=np.tri(Z.shape[0],k=-1)
Z= np.ma.array(Z, mask=mask)
fig = plt.figure()
fig.subplots_adjust(bottom=.15)
fig.subplots_adjust(left=.15)
ax = fig.add_subplot(111)
figure=ax.imshow(Z,cmap=blue_yellow_red2,interpolation="nearest")
cbar=fig.colorbar(figure,pad=.02)
cbar.ax.tick_params(labelsize=40)
cbar.set_label('number of genes', rotation=270,fontsize=80,labelpad=25)
ax.set_yticks([i for i in range(0,16)])
ax.set_yticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1])
ax.tick_params(axis='y',labelsize=40)
ax.set_xticks([i for i in range(0,16)])
ax.set_xticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1],rotation=90)
ax.tick_params(axis='x',labelsize=40,)
ax.tick_params(axis='x',length=0,width=0)
ax.tick_params(axis='y',length=0,width=0)
ax.invert_yaxis()
ax.invert_xaxis()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.show()
##get the overlaps within cancers
final_array=[]
for index1,i in enumerate(all_cancers1):
for index2,j in enumerate(all_cancers2):
if index2==index1:
final_array.append(len([k for k in j if k in i]))
f=open('overlap_within_cancers.txt','w')
f.write(str(final_array))
f.close()
| mit |
stscieisenhamer/glue | doc/conf.py | 2 | 13273 | # -*- coding: utf-8 -*-
#
# Glue documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 25 12:05:47 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
import os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
import warnings
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Import matplotlib now to make sure the warning doesn't cause the Sphinx build
# to fail
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
import PyQt5
import matplotlib.pyplot as plt
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
'sphinx_automodapi.automodapi', 'numpydoc',
'sphinx.ext.intersphinx']
# Add the redirect.py plugin which is in this directory
import sys
sys.path.insert(0, os.path.abspath('.'))
extensions.append('redirect')
# Workaround for RTD where the default encoding is ASCII
if ON_RTD:
import locale
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
intersphinx_cache_limit = 10 # days to keep the cached inventories
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/latest/', None),
'python': ('https://docs.python.org/2.7', None),
'matplotlib': ('http://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'echo': ('http://echo.readthedocs.io/en/latest/', None),
}
numpydoc_show_class_members = False
autosummary_generate = True
automodapi_toctreedirnm = 'api'
# At the moment, sphinx-automodapi causes a warning to appear about autoattribute being
# registered twice, but this will be fixed in the next release.
suppress_warnings = ['app.add_directive', 'app.add_node']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Glue'
copyright = u'2012-2017, Chris Beaumont, Thomas Robitaille, Michelle Borkin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from glue import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates', '.eggs']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try: # use ReadTheDocs theme, if installed
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), ]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gluedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Glue.tex', u'Glue Documentation',
u'Chris Beaumont, Thomas Robitaille, Michelle Borkin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glue', u'Glue Documentation',
[u'Chris Beaumont, Thomas Robitaille, Michelle Borkin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Glue', u'Glue Documentation',
u'Chris Beaumont, Thomas Robitaille, Michelle Borkin', 'Glue', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
todo_include_todos = True
autoclass_content = 'both'
nitpick_ignore = [('py:class', 'object'), ('py:class', 'str'),
('py:class', 'list'), ('py:obj', 'numpy array'),
('py:obj', 'integer'), ('py:obj', 'Callable'),
('py:obj', 'list'),
('py:class', 'PySide.QtGui.QMainWindow'),
('py:class', 'PySide.QtGui.QWidget'),
('py:class', 'PyQt4.QtGui.QTextEdit'),
('py:class', 'PyQt4.QtGui.QTabBar'),
('py:class', 'PyQt4.QtGui.QLabel'),
('py:class', 'PyQt4.QtGui.QComboBox'),
('py:class', 'PyQt4.QtGui.QMessageBox'),
('py:class', 'PyQt4.QtGui.QToolBar'),
('py:class', 'PyQt4.QtCore.QMimeData'),
('py:class', 'PyQt4.QtCore.QAbstractListModel'),
('py:class', 'PyQt4.QtCore.QThread'),
('py:class', 'PyQt4.QtGui.QStyledItemDelegate'),
('py:class', 'PyQt5.QtWidgets.QMainWindow'),
('py:class', 'PyQt5.QtWidgets.QWidget'),
('py:class', 'PyQt5.QtWidgets.QTextEdit'),
('py:class', 'PyQt5.QtWidgets.QTabBar'),
('py:class', 'PyQt5.QtWidgets.QLabel'),
('py:class', 'PyQt5.QtWidgets.QComboBox'),
('py:class', 'PyQt5.QtWidgets.QMessageBox'),
('py:class', 'PyQt5.QtWidgets.QToolBar'),
('py:class', 'PyQt5.QtWidgets.QStyledItemDelegate'),
('py:class', 'PyQt5.QtCore.QMimeData'),
('py:class', 'PyQt5.QtCore.QAbstractListModel'),
('py:class', 'PyQt5.QtCore.QThread'),
('py:obj', "str ('file' | 'directory' | 'label')"),
('py:obj', 'function(application)'),
('py:class', 'builtins.object'),
('py:class', 'builtins.list'),
('py:class', 'builtins.type'),
('py:class', 'glue.viewers.histogram.layer_artist.HistogramLayerBase'),
('py:class', 'glue.viewers.scatter.layer_artist.ScatterLayerBase'),
('py:class', 'glue.viewers.image.layer_artist.ImageLayerBase'),
('py:class', 'glue.viewers.image.layer_artist.RGBImageLayerBase'),
('py:class', 'PyQt4.QtGui.QMainWindow'),
('py:class', 'PyQt4.QtGui.QWidget'),
('py:mod', 'glue.core'),
('py:mod', 'glue.viewers'),
('py:mod', 'glue.viewers.scatter'),
('py:mod', 'glue.viewers.common'),
('py:mod', 'glue.viewers.common.qt.mouse_mode'),
('py:mod', 'glue.dialogs.custom_component'),
('py:class', 'glue.external.echo.core.HasCallbackProperties'),
('py:class', 'glue.external.echo.core.CallbackProperty'),
('py:class', 'glue.external.echo.selection.SelectionCallbackProperty'),
('py:class', 'glue.viewers.image.state.BaseImageLayerState'),
('py:class', 'glue.viewers.common.qt.data_viewer_with_state.DataViewerWithState')
]
# coax Sphinx into treating descriptors as attributes
# see https://bitbucket.org/birkenfeld/sphinx/issue/1254/#comment-7587063
from glue.utils.qt.widget_properties import WidgetProperty
WidgetProperty.__get__ = lambda self, *args, **kwargs: self
viewcode_import = False
| bsd-3-clause |
dmnfarrell/epitopepredict | epitopepredict/plotting.py | 1 | 37189 | #!/usr/bin/env python
"""
epitopepredict plotting
Created February 2016
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, os, math
from collections import OrderedDict
try:
import matplotlib
matplotlib.use('agg', warn=False)
import pylab as plt
except:
pass
import numpy as np
import pandas as pd
from . import base
colormaps={'tepitope':'Greens','netmhciipan':'Oranges','iedbmhc2':'Pinks',
'iedbmhc1':'Blues'}
defaultcolors = {'tepitope':'green','netmhciipan':'orange','basicmhc1':'yellow',
'iedbmhc1':'blue','iedbmhc2':'pink'}
def plot_heatmap(df, ax=None, figsize=(6,6), **kwargs):
"""Plot a generic heatmap """
if ax==None:
fig=plt.figure(figsize=figsize)
ax=fig.add_subplot(111)
else:
fig = ax.get_figure()
df = df._get_numeric_data()
hm = ax.pcolor(df, **kwargs)
#fig.colorbar(hm, ax=ax)
ax.set_xticks(np.arange(0.5, len(df.columns)))
ax.set_yticks(np.arange(0.5, len(df.index)))
ax.set_xticklabels(df.columns, minor=False, fontsize=14,rotation=90)
ax.set_yticklabels(df.index, minor=False, fontsize=14)
ax.set_ylim(0, len(df.index))
hm.set_clim(0,1)
#ax.grid(True)
plt.tight_layout()
return ax
def get_seq_from_binders(P, name=None):
"""Get sequence from binder data. Probably better to store the sequences in the object?"""
if P.data is None or len(P.data)==0:
return
if name is not None:
data=P.data[P.data.name==name]
else:
data=P.data
l = len(data.iloc[0].peptide)
seqlen = data.pos.max()+l
return seqlen
def get_bokeh_colors(palette='Set1'):
from bokeh.palettes import brewer
n = len(base.predictors)
pal = brewer[palette][n]
i=0
clrs={}
for m in base.predictors:
clrs[m] = pal[i]
i+=1
return clrs
def get_sequence_colors(seq):
"""Get colors for a sequence"""
from bokeh.palettes import brewer, viridis, plasma
from Bio.PDB.Polypeptide import aa1
pal = plasma(20)
pal.append('white')
aa1 = list(aa1)
aa1.append('-')
pcolors = {i:j for i,j in zip(aa1,pal)}
text = list(seq)
clrs = {'A':'red','T':'green','G':'orange','C':'blue','-':'white'}
try:
colors = [clrs[i] for i in text]
except:
colors = [pcolors[i] for i in text]
return colors
def bokeh_test(n=20,height=400):
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.models.glyphs import Text, Rect, Circle
data = {'x_values': np.random.random(n),
'y_values': np.random.random(n)}
source = ColumnDataSource(data=data)
tools = "pan,wheel_zoom,hover,tap,reset,save"
p = figure(plot_height=height,tools=tools)
c = Circle(x='x_values', y='y_values', radius=.02, line_color='black', fill_color='blue', fill_alpha=.6)
#p.circle(x='x_values', y='y_values', radius=.02, line_color='black', fill_color='blue', fill_alpha=.6, source=source)
p.add_glyph(source, c)
return p
def bokeh_summary_plot(df, savepath=None):
"""Summary plot"""
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.models import ColumnDataSource,Range1d,HoverTool,TapTool,CustomJS,OpenURL
TOOLS = "pan,wheel_zoom,hover,tap,reset,save"
colors = get_bokeh_colors()
df=df.rename(columns={'level_0':'predictor'})
df['color'] = [colors[x] for x in df['predictor']]
p = figure(title = "Summary", tools=TOOLS, width=500, height=500)
p.xaxis.axis_label = 'binder_density'
p.yaxis.axis_label = 'binders'
#make metric for point sizes
#df['point_size'] = df.binder_density
source = ColumnDataSource(data=df)
p.circle(x='binder_density', y='binders', line_color='black', fill_color='color',
fill_alpha=0.4, size=10, source=source, legend_group='predictor')
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("name", "@name"),
("length", "@length"),
("binders", "@binders"),
("binder_density", "@binder_density"),
("top_peptide", "@top_peptide"),
("max_score", "@max_score"),
])
p.toolbar.logo = None
if savepath != None:
url = "http://localhost:8000/sequence?savepath=%s&name=@name" %savepath
taptool = p.select(type=TapTool)
taptool.callback = OpenURL(url=url)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var f = cb_obj.value
data['x'] = f
source.trigger('change');
source.change.emit();
""")
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Select
menu = [(i,i) for i in df.columns]
select = Select(title='X', value='A', options=list(df.columns), width=8)
select.js_on_change('value', callback)
#layout = column(p, select, sizing_mode='scale_width')
return p
def bokeh_plot_tracks(preds, title='', n=2, name=None, cutoff=.95, cutoff_method='default',
width=None, height=None, x_range=None, tools=True, palette='Set1',
seqdepot=None, exp=None):
"""
Plot binding predictions as parallel tracks of blocks for each allele.
This uses Bokeh.
Args:
title: plot title
n: min alleles to display
name: name of protein to show if more than one in data
Returns: a bokeh figure for embedding or displaying in a notebook
"""
from collections import OrderedDict
from bokeh.models import Range1d, HoverTool, FactorRange, ColumnDataSource, Text, Rect
from bokeh.plotting import figure
if tools == True:
tools="xpan, xwheel_zoom, hover, reset, save"
else:
tools=''
if width == None:
width=1000
sizing_mode='scale_width'
else:
sizing_mode='fixed'
alls=1
seqlen=0
for P in preds:
if P.data is None or len(P.data)==0:
continue
seqlen = get_seq_from_binders(P, name=name)
#print (seqlen)
alls += len(P.data.groupby('allele'))
if seqlen == 0:
return
if height==None:
height = 140+10*alls
if x_range == None:
x_range = Range1d(0, seqlen, bounds='auto')
yrange = Range1d(start=0, end=alls+3)
plot = figure(title=title, plot_width=width, sizing_mode=sizing_mode,
plot_height=height, y_range=yrange, x_range=x_range,
y_axis_label='allele',
tools=tools)
h=3
if exp is not None:
plotExp(plot, exp)
colors = get_bokeh_colors(palette)
x=[];y=[];allele=[];widths=[];clrs=[];peptide=[]
predictor=[];position=[];score=[];leg=[];seqs=[];text=[]
l=80
i=0
for pred in preds:
m = pred.name
df = pred.data
seq = base.sequence_from_peptides(df)
if df is None or len(df) == 0:
print('no data to plot for %s' %m)
continue
if name != None:
df = df[df.name==name]
sckey = pred.scorekey
binders = pred.get_binders(name=name, cutoff=cutoff, cutoff_method=cutoff_method)
#print (cutoff, n)
pb = pred.promiscuous_binders(n=n, name=name, cutoff=cutoff, cutoff_method=cutoff_method)
if len(pb) == 0:
continue
l = base.get_length(pb)
grps = df.groupby('allele')
alleles = grps.groups.keys()
#seqs.extend([seq for i in alleles])
#t = [i for s in list(seqs) for i in s]
#text.extend(t)
if len(pb)==0:
continue
c = colors[m]
leg.append(m)
seqlen = df.pos.max()+l
for a,g in grps:
b = binders[binders.allele==a]
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort_values('pos',inplace=True)
scores = b[sckey].values
score.extend(scores)
pos = b['pos'].values
position.extend(pos)
x.extend(pos+(l/2.0)) #offset as coords are rect centers
widths.extend([l for i in scores])
clrs.extend([c for i in scores])
y.extend([h+0.5 for i in scores])
alls = [a for i in scores]
allele.extend(alls)
peptide.extend(list(b.peptide.values))
predictor.extend([m for i in scores])
h+=1
i+=1
data = dict(x=x,y=y,allele=allele,peptide=peptide,width=widths,color=clrs,
predictor=predictor,position=position,score=score)
source = ColumnDataSource(data=data)
plot.rect(x='x', y='y', source=source, width='width', height=0.8,
legend_group='predictor',
color='color',line_color='gray',alpha=0.7)
#glyph = Text(x="x", y="y", text="text", text_align='center', text_color="black",
# text_font="monospace", text_font_size="10pt")
#plot.add_glyph(source, glyph)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("allele", "@allele"),
("position", "@position"),
("peptide", "@peptide"),
("score", "@score"),
("predictor", "@predictor"),
])
plot.xaxis.major_label_text_font_size = "9pt"
plot.xaxis.major_label_text_font_style = "bold"
plot.ygrid.grid_line_color = None
plot.xgrid.minor_grid_line_alpha = 0.1
plot.xgrid.minor_grid_line_color = 'gray'
#plot.xgrid.minor_grid_line_dash = [6, 4]
plot.yaxis.major_label_text_font_size = '0pt'
#plot.xaxis.major_label_orientation = np.pi/4
plot.min_border = 10
plot.background_fill_color = "#fafaf4"
plot.background_fill_alpha = 0.5
plot.legend.orientation = "horizontal"
plot.legend.location = "bottom_right"
#plot.legend.label_text_font_size = 12
plot.toolbar.logo = None
plot.toolbar_location = "right"
return plot
def bokeh_plot_sequence(preds, name=None, n=2, cutoff=.95, cutoff_method='default',
width=1000, color_sequence=False, title=''):
"""Plot sequence view of binders """
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LinearAxis, Grid, Range1d, Text, Rect, CustomJS, Slider, RangeSlider, FactorRange
from bokeh.layouts import gridplot, column
colors = []
seqs = []
text = []
alleles = []
ylabels = []
pcolors = get_bokeh_colors()
for P in preds:
print (P.name)
df = P.data
#get sequence from results dataframe
seq = base.sequence_from_peptides(df)
l = base.get_length(df)
b = P.get_binders(name=name, cutoff=cutoff, cutoff_method=cutoff_method)
pb = P.promiscuous_binders(name=name, cutoff=cutoff, n=n, cutoff_method=cutoff_method)
b = b[b.pos.isin(pb.pos)] #only promiscuous
grps = b.groupby('allele')
al = list(grps.groups)
alleles.extend(al)
ylabels.extend([P.name+' '+i for i in al])
currseq=[seq for i in al]
seqs.extend(currseq)
t = [i for s in currseq for i in s]
text.extend(t)
print (len(seqs),len(text))
for a in al:
pos=[]
f = list(b[b.allele==a].pos)
for i in f:
pos.extend(np.arange(i,i+l))
if color_sequence is True:
c = plotting.get_sequence_colors(seq)
else:
c = ['white' for i in seq]
for i in pos:
c[i] = pcolors[P.name]
colors.extend(c)
#put into columndatasource for plotting
N = len(seqs[0])
S = len(alleles)
x = np.arange(1, N+1)
y = np.arange(0,S,1)
xx, yy = np.meshgrid(x, y)
gx = xx.ravel()
gy = yy.flatten()
recty = gy+.5
source = ColumnDataSource(dict(x=gx, y=gy, recty=recty, text=text, colors=colors))
plot_height = len(seqs)*15+60
x_range = Range1d(0,N+1, bounds='auto')
L=100
if len(seq)<100:
L=len(seq)
view_range = (0,L)
viewlen = view_range[1]-view_range[0]
fontsize="8.5pt"
tools="xpan, reset, save"
p = figure(title=title, plot_width=width, plot_height=plot_height, x_range=view_range, y_range=ylabels, tools=tools,
min_border=0, sizing_mode='stretch_both', lod_factor=10, lod_threshold=1000)
seqtext = Text(x="x", y="y", text="text", text_align='center',text_color="black",
text_font="monospace", text_font_size=fontsize)
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors", line_color='gray', fill_alpha=0.6)
p.add_glyph(source, rects)
p.add_glyph(source, seqtext)
p.xaxis.major_label_text_font_style = "bold"
p.grid.visible = False
p.toolbar.logo = None
#preview view (no text)
p1 = figure(title=None, plot_width=width, plot_height=S*3+5, x_range=x_range, y_range=(0,S), tools=[],
min_border=0, sizing_mode='stretch_width', lod_factor=10, lod_threshold=10)
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors", line_color=None, fill_alpha=0.6)
previewrect = Rect(x=viewlen/2,y=S/2, width=viewlen, height=S*.99, line_color='darkblue', fill_color=None)
p1.add_glyph(source, rects)
p1.add_glyph(source, previewrect)
p1.yaxis.visible = False
p1.grid.visible = False
p1.toolbar_location = None
#callback for slider move
jscode="""
var start = cb_obj.value[0];
var end = cb_obj.value[1];
x_range.setv({"start": start, "end": end})
rect.width = end-start;
rect.x = start+rect.width/2;
var fac = rect.width/width;
console.log(fac);
if (fac>=.14) { fontsize = 0;}
else { fontsize = 8.5; }
text.text_font_size=fontsize+"pt";
"""
callback = CustomJS(
args=dict(x_range=p.x_range,rect=previewrect,text=seqtext,width=p.plot_width), code=jscode)
slider = RangeSlider (start=0, end=N, value=(0,L), step=10)#, callback_policy="throttle")
slider.js_on_change('value_throttled', callback)
#callback for plot drag
jscode="""
start = parseInt(range.start);
end = parseInt(range.end);
slider.value[0] = start;
rect.width = end-start;
rect.x = start+rect.width/2;
"""
#p.x_range.callback = CustomJS(args=dict(slider=slider, range=p.x_range, rect=previewrect),
# code=jscode)
p = gridplot([[p1],[p],[slider]], toolbar_location="below", merge_tools=False)
return p
def bokeh_plot_grid(pred, name=None, width=None, palette='Blues', **kwargs):
"""Plot heatmap of binding results for a predictor."""
from bokeh.plotting import figure
from bokeh.models import (Range1d,HoverTool,FactorRange,ColumnDataSource,
LinearColorMapper,LogColorMapper,callbacks,DataRange)
from bokeh.palettes import all_palettes
TOOLS = "xpan, xwheel_zoom, hover, reset, save"
if width == None:
sizing_mode = 'scale_width'
width=900
else:
sizing_mode = 'fixed'
P=pred
df = P.data
if df is None:
return
cols = ['allele','pos','peptide',P.scorekey]
#d = df[cols].copy()
b = P.get_binders(name=name,**kwargs)
d = P.data.copy()
#mark binders
mask = d.index.isin(b.index)
d['binder'] = mask
l = base.get_length(df)
grps = df.groupby('allele')
alleles = grps.groups
seqlen = get_seq_from_binders(P, name)
seq = base.seq_from_binders(df)
height = 300
alls = len(alleles)
x_range = Range1d(0,seqlen-l+1, bounds='auto')
#x_range = list(seq)
y_range = df.allele.unique()
val = P.scorekey
cut = P.cutoff
if P.name not in ['tepitope']:
d['score1'] = d[val].apply( lambda x: 1-math.log(x, 50000))
val='score1'
d[val][d.binder==False] = min(d[val])
source = ColumnDataSource(d)
colors = all_palettes[palette][7]
mapper = LinearColorMapper(palette=colors, low=d[val].max(), high=d[val].min())
p = figure(title=P.name+' '+name,
x_range=x_range, y_range=y_range,
x_axis_location="above", plot_width=width, plot_height=height,
tools=TOOLS, toolbar_location='below', sizing_mode=sizing_mode)
p.rect(x="pos", y="allele", width=1, height=1,
source=source,
fill_color={'field': val,'transform':mapper},
line_color='gray', line_width=.1)
p.select_one(HoverTool).tooltips = [
('allele', '@allele'),
(P.scorekey, '@%s{1.11}' %P.scorekey),
('pos', '@pos'),
('peptide', '@peptide')
]
p.toolbar.logo = None
p.yaxis.major_label_text_font_size = "10pt"
p.yaxis.major_label_text_font_style = "bold"
return p
def bokeh_plot_bar(preds, name=None, allele=None, title='', width=None, height=100,
palette='Set1', tools=True, x_range=None):
"""Plot bars combining one or more prediction results for a set of
peptides in a protein/sequence"""
from bokeh.models import Range1d,HoverTool,ColumnDataSource
from bokeh.plotting import figure
from bokeh.transform import dodge
from bokeh.core.properties import value
height = 180
seqlen = 0
if width == None:
width=700
sizing_mode='scale_width'
for P in preds:
if P.data is None or len(P.data)==0:
continue
seqlen = get_seq_from_binders(P)
if x_range == None:
x_range = Range1d(0,seqlen)
y_range = Range1d(start=0, end=1)
if tools == True:
tools="xpan, xwheel_zoom, reset, hover"
else:
tools=None
plot = figure(title=title,plot_width=width,sizing_mode=sizing_mode,
plot_height=height, y_range=y_range, x_range=x_range,
y_axis_label='rank',
tools=tools)
colors = get_bokeh_colors(palette)
data = {}
mlist = []
for pred in preds:
m = pred.name
df = pred.data
if df is None or len(df) == 0:
continue
if name != None:
df = df[df.name==name]
grps = df.groupby('allele')
alleles = grps.groups.keys()
if allele not in alleles:
continue
#print (m, alleles, allele)
df = df[df.allele==allele]
df = df.sort_values('pos').set_index('pos')
key = pred.scorekey
X = df[key]
X = (X+abs(X.min())) / (X.max() - X.min())
data[m] = X.values
data['pos'] = list(X.index)
#data['peptide'] = df.peptide.values
mlist.append(m)
source = ColumnDataSource(data)
w = round(1.0/len(mlist),1)-.1
i=-w/2
for m in mlist:
#m = pred.name
c = colors[m]
plot.vbar(x=dodge('pos', i, range=plot.x_range), top=m, width=w, source=source,
color=c, legend=value(m), alpha=.8)
i+=w
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
#("allele", "@allele"),
("pos", "@pos") ])
plot.min_border = 10
plot.background_fill_color = "beige"
plot.background_fill_alpha = 0.5
plot.toolbar.logo = None
plot.toolbar_location = "right"
plot.legend.location = "top_right"
plot.legend.orientation = "horizontal"
return plot
def bokeh_vbar(x, height=200, title='', color='navy'):
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
source = ColumnDataSource(data={'chr':list(x.index),'x':range(len(x)),'y':x.values})
plot = figure(title=title, x_range = list(x.index), plot_height=height, tools='save,reset')
plot.vbar(x='chr',top='y', width=.8, bottom=0,source=source, color=color)
plot.ygrid.grid_line_color = None
plot.xgrid.grid_line_color = None
plot.xaxis.major_label_orientation = np.pi/4
return plot
def bokeh_pie_chart(df, title='', radius=.5, width=400, height=400, palette='Spectral'):
"""Bokeh pie chart"""
from bokeh.plotting import figure
from bokeh.models import HoverTool,ColumnDataSource
from math import pi
s = df.cumsum()/df.sum()
cats = s.index
p=[0]+list(s)
#print (p)
starts = [1/2*pi-(i*2*pi) for i in p[:-1]]
ends = [1/2*pi-(i*2*pi) for i in p[1:]]
from bokeh.palettes import brewer
n = len(s)
pal = brewer[palette][6]
source = ColumnDataSource(
dict(x=[0 for x in s], y=[0 for x in s],
radius = [radius for x in s],
category= cats,
starts=starts,
ends=ends,
colors=pal,
counts = df
))
plot = figure(title=title, plot_width=width, plot_height=height, tools='save,reset')
plot.wedge(x='x', y='y', radius='radius', direction="clock", fill_color='colors', color='black',
start_angle='starts', end_angle='ends', legend='category', source=source)
plot.axis.visible = False
plot.ygrid.visible = False
plot.xgrid.visible = False
#hover = plot.select(dict(type=HoverTool))
#hover.tooltips = [
# ('category', '@category'),
# ('percents','@counts')
#]
return plot
def plot_tracks(preds, name, n=1, cutoff=.95, cutoff_method='default', regions=None,
legend=False, colormap='Paired', figsize=None, ax=None, **kwargs):
"""
Plot binders as bars per allele using matplotlib.
Args:
preds: list of one or more predictors
name: name of protein to plot
n: number of alleles binder should be found in to be displayed
cutoff: percentile cutoff to determine binders to show
"""
import matplotlib as mpl
import pylab as plt
from matplotlib.patches import Rectangle
if ax == None:
if figsize==None:
h = sum([len(p.data.groupby('allele')) for p in preds])
w = 10
h = round(h*.1+2)
figsize = (w,h)
#plt.clf()
fig = plt.figure(figsize=figsize,facecolor='white')
ax = fig.add_subplot(111)
p = len(preds)
cmap = mpl.cm.get_cmap(colormap)
colors = { preds[i].name : cmap(float(i)/p) for i in range(p) }
alleles = []
leg = []
y=0
handles = []
for pred in preds:
m = pred.name
df = pred.data
if df is None or len(df) == 0:
print('no data to plot for %s' %m)
continue
if name != None:
if name not in df.name.unique():
print ('no such sequence %s' %name)
continue
df = df[df.name==name]
sckey = pred.scorekey
binders = pred.get_binders(name=name, cutoff=cutoff,
cutoff_method=cutoff_method)
#print (binders)
pb = pred.promiscuous_binders(binders=binders, n=n)
if len(pb) == 0:
continue
l = base.get_length(pb)
seqlen = df.pos.max()+l
#print (name,m,df.pos.max(),l,seqlen)
grps = df.groupby('allele')
if m in colors:
c=colors[m]
else:
c='blue'
leg.append(m)
order = sorted(grps.groups)
alleles.extend(order)
#for a,g in grps:
for a in order:
g = grps.groups[a]
b = binders[binders.allele==a]
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort_values('pos',inplace=True)
pos = b['pos'].values+1 #assumes pos is zero indexed
#clrs = [scmap.to_rgba(i) for i in b[sckey]]
#for x,c in zip(pos,clrs):
for x in pos:
rect = ax.add_patch(Rectangle((x,y), l, 1, facecolor=c, edgecolor='black',
lw=1.5, alpha=0.6))
y+=1
handles.append(rect)
if len(leg) == 0:
return
ax.set_xlim(0, seqlen)
ax.set_ylim(0, len(alleles))
w=20
if seqlen>500: w=100
ax.set_xticks(np.arange(0, seqlen, w))
ax.set_ylabel('allele')
ax.set_yticks(np.arange(.5,len(alleles)+.5))
fsize = 14-1*len(alleles)/40.
ax.set_yticklabels(alleles, fontsize=fsize )
ax.grid(b=True, which='major', alpha=0.5)
ax.set_title(name, fontsize=16, loc='right')
if regions is not None:
r = regions[regions.name==name]
coords = (list(r.start),list(r.end-r.start))
coords = zip(*coords)
plot_regions(coords, ax, color='gray')
if legend == True:
ax.legend(handles, leg, bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3)
plt.tight_layout()
return ax
def plot_regions(coords, ax, color='red', label='', alpha=0.6):
"""Highlight regions in a prot binder plot"""
from matplotlib.patches import Rectangle
#l = len(seqs.head(1)['key'].max())
h = ax.get_ylim()[1]
for c in coords:
x,l = c
ax.add_patch(Rectangle((x,0), l, h,
facecolor=color, lw=.8, alpha=alpha, zorder=0))
return
def draw_labels(labels, coords, ax):
"""Add labels on axis"""
bbox_args = dict(boxstyle='square',fc='whitesmoke')
from matplotlib.transforms import blended_transform_factory
tform = blended_transform_factory(ax.transData, ax.transAxes)
for text, x in zip(labels,coords):
xy = (x,-.05)
an = ax.annotate(text, xy=xy, xycoords=tform,
ha='center', va="center",
size=14,
zorder=10, textcoords='offset points',
bbox=bbox_args)
plt.subplots_adjust(bottom=0.1)
return
def plot_bars(P, name, chunks=1, how='median', cutoff=20, color='black'):
"""
Bar plots for sequence using median/mean/total scores.
Args:
P: predictor with data
name: name of protein sequence
chunks: break sequence up into 1 or more chunks
how: method to calculate score bar value
perc: percentile cutoff to show peptide
"""
import seaborn as sns
df = P.data[P.data.name==name].sort_values('pos')
w = 10
l= base.get_length(df)
seqlen = df.pos.max()+l
funcs = {'median': np.median, 'mean': np.mean, 'sum': np.sum}
grps = df.groupby('pos')
key = P.scorekey
X = grps.agg({key: np.median, 'peptide': base.first})
q = (1-cutoff/100.) #score quantile value
cutoff = X[key].quantile(q)
X[key][X[key]<cutoff] = np.nan
if len(X)<20:
chunks = 1
seqlist = X.peptide.apply( lambda x : x[0])
seqchunks = np.array_split(X.index, chunks)
f,axs = plt.subplots(chunks,1,figsize=(15,2+2.5*chunks))
if chunks == 1:
axs = [axs]
else:
axs = list(axs.flat)
for c in range(chunks):
#print (c)
ax = axs[c]
st = seqchunks[c][0]
end = seqchunks[c][-1]
p = X[st:end]
#p = p[p.peptide.isin(cb.peptide)]
ax.bar(p.index, p[key], width=1, color=color)
ax.set_title(str(st)+'-'+str(end), loc='right')
xseq = seqlist[st:end]
if len(xseq)<150:
fsize = 16-1*len(xseq)/20.
ax.set_xlim(st,end)
ax.set_xticks(p.index+0.5)
ax.set_xticklabels(xseq, rotation=0, fontsize=fsize)
ax.set_ylim(X[key].min(), X[key].max())
f.suptitle(name+' - '+P.name)
plt.tight_layout()
return axs
def plot_bcell(plot,pred,height,ax=None):
"""Line plot of iedb bcell results"""
x = pred.data.Position
y = pred.data.Score
h = height
y = y+abs(min(y))
y = y*(h/max(y))+3
#plot.line(x, y, line_color="red", line_width=2, alpha=0.6,legend='bcell')
ax.plot(x,y,color='blue')
return
def plot_seqdepot(annotation, ax):
"""Plot sedepot annotations - replace with generic plot coords track"""
from matplotlib.patches import Rectangle
y=-1.5
fontsize=12
if 'signalp' in annotation:
bbox_args = dict(boxstyle='rarrow', fc='white', lw=1, alpha=0.8)
pos = annotation['signalp'].values()
print (pos)
for x in pos:
an = ax.annotate('SP', xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
if 'tmhmm' in annotation:
vals = annotation['tmhmm']
pos = [i[0]+(i[1]-i[0])/2.0 for i in vals]
widths = [i[1]-i[0] for i in vals]
bbox_args = dict(boxstyle='round', fc='deepskyblue', lw=1, alpha=0.8)
for x,w in zip(pos,widths):
an = ax.annotate('TMHMM', xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
if 'pfam27' in annotation:
vals = annotation['pfam27']
text = [i[0] for i in vals]
pos = [i[1]+(i[2]-i[1])/2.0 for i in vals]
widths = [i[2]-i[1] for i in vals]
#print (pos,widths,text)
bbox_args = dict(boxstyle='round', fc='white', lw=1, alpha=0.8)
for x,w,t in zip(pos,widths,text):
an = ax.annotate(t, xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
ax.set_ylim(y-1, ax.get_ylim()[1])
return
def plot_multiple(preds, names, kind='tracks', regions=None, genome=None, **kwargs):
"""Plot results for multiple proteins"""
for prot in names:
if kind == 'tracks':
ax = plot_tracks(preds,name=prot,**kwargs)
elif kind == 'bar':
axs = plot_bars(preds[0],name=prot)
ax = axs[0]
if regions is not None:
r = regions[regions.name==prot]
print (r)
#print genome[genome.locus_tag==prot]
coords = (list(r.start),list(r.end-r.start))
coords = zip(*coords)
plot_regions(coords, ax, color='gray')
#labels = list(r.peptide)
#plotting.draw_labels(labels, coords, ax)
if genome is not None:
p = genome[genome['locus_tag']==prot]
seq = p.translation.iloc[0]
from . import analysis
sd = analysis.get_seqdepot(seq)['t']
plot_seqdepot(sd, ax)
plt.tight_layout()
plt.show()
return
def plot_binder_map(P, name, values='rank', cutoff=20, chunks=1, cmap=None):
"""
Plot heatmap of binders above a cutoff by rank or score.
Args:
P: predictor object with data
name: name of protein to plot
values: data column to use for plot data, 'score' or 'rank'
cutoff: cutoff if using rank as values
chunks: number of plots to split the sequence into
"""
import pylab as plt
import seaborn as sns
df = P.data[P.data.name==name].sort_values('pos')
w = 10
l= base.get_length(df)
seqlen = df.pos.max()+l
f,axs = plt.subplots(chunks,1,figsize=(15,3+2.5*chunks))
if chunks == 1:
axs = [axs]
else:
axs = list(axs.flat)
if values == 'score':
values = P.scorekey
if cmap == None: cmap='RdBu_r'
X = df.pivot_table(index='allele', columns='pos', values=values)
if values == P.scorekey:
#normalise score across alleles for clarity
zscore = lambda x: (x - x.mean()) / x.std()
X = X.apply(zscore, 1)
if values == 'rank':
X[X > cutoff] = 0
if cmap == None: cmap='Blues'
s = df.drop_duplicates(['peptide','pos'])
seqlist = s.set_index('pos').peptide.apply( lambda x : x[0])
#print seqlist
seqchunks = np.array_split(X.columns, chunks)
for c in range(chunks):
ax = axs[c]
p = X[seqchunks[c]]
#plot heatmap
vmin=min(X.min()); vmax=max(X.max())
center = vmin+(vmax-vmin)/2
sns.heatmap(p, cmap=cmap, cbar_kws={"shrink": .5},
vmin=vmin, vmax=vmax, #center=center,
ax=ax, xticklabels=20)
#show sequence on x-axis
st = seqchunks[c][0]
end = seqchunks[c][-1]
xseq = seqlist[st:end]
ax.set_title(str(st)+'-'+str(end), loc='right')
ax.spines['bottom'].set_visible(True)
if len(seqchunks[c])<150:
fsize = 16-1*len(seqchunks[c])/20.
ax.set_xticks(np.arange(0,len(xseq))+0.5)
ax.set_xticklabels(xseq, rotation=0, fontsize=fsize)
f.suptitle(name+' - '+P.name)
plt.tight_layout()
return ax
def binders_to_coords(df):
"""Convert binder results to dict of coords for plotting"""
coords = {}
if not 'start' in df.columns:
df=base.get_coords(df)
if 'start' in df.columns:
for i,g in df.groupby('name'):
l = g.end-g.start
coords[i] = zip(g.start,l)
return coords
def plot_overview(genome, coords=None, cols=2, colormap='Paired',
legend=True, figsize=None):
"""
Plot regions of interest in a group of protein sequences. Useful for
seeing how your binders/epitopes are distributed in a small genome or subset of genes.
Args:
genome: dataframe with protein sequences
coords: a list/dict of tuple lists of the form {protein name: [(start,length)..]}
cols: number of columns for plot, integer
"""
import pylab as plt
if type(coords) is list:
coords = { i:coords[i] for i in range(len(coords)) }
legend=False
import matplotlib as mpl
import seaborn as sns
#sns.reset_orig()
cmap = mpl.cm.get_cmap(colormap)
t = len(coords)
colors = [cmap(float(i)/t) for i in range(t)]
from matplotlib.patches import Rectangle
names = [coords[c].keys() for c in coords][0]
df = genome[genome.locus_tag.isin(names)]
rows = int(np.ceil(len(names)/float(cols)))
if figsize == None:
h = round(len(names)*.2+10./cols)
figsize = (14,h)
f,axs=plt.subplots(rows,cols,figsize=figsize)
grid=axs.flat
rects = {}
i=0
for idx,prot in df.iterrows():
ax=grid[i]
protname = prot.locus_tag
seq = prot.translation
if 'description' in prot:
title = prot.description
else:
title = protname
y=0
for label in coords:
c = coords[label]
if not protname in c:
continue
vals = c[protname]
#print vals
for v in vals:
x,l = v[0],v[1]
rect = ax.add_patch(Rectangle((x,y), l, .9,
facecolor=colors[y],
lw=1.2, alpha=0.8))
if len(v)>2:
s = v[2]
bbox_args = dict(fc=colors[y], lw=1.2, alpha=0.8)
ax.annotate(s, (x+l/2, y),
fontsize=12, ha='center', va='bottom')
if not label in rects:
rects[label] = rect
y+=1
i+=1
slen = len(seq)
w = round(float(slen)/20.)
w = math.ceil(w/20)*20
ax.set_xlim(0, slen)
ax.set_ylim(0, t)
ax.set_xticks(np.arange(0, slen, w))
ax.set_yticks([])
ax.set_title(title, fontsize=16, loc='right')
if i|2!=0 and cols>1:
try:
f.delaxes(grid[i])
except:
pass
if legend == True:
f.legend(rects.values(), rects.keys(), loc=4)
plt.tight_layout()
return
def seqdepot_to_coords(sd, key='pfam27'):
"""
Convert seqdepot annotations to coords for plotting
"""
coords=[]
if len(sd['t'])==0 or not key in sd['t']:
return []
x = sd['t'][key]
#print x
if key in ['pfam27','pfam28']:
coords = [(i[1],i[2]-i[1],i[0]) for i in x]
elif key in ['gene3d','prints']:
coords = [(i[2],i[3]-i[2],i[1]) for i in x]
elif key == 'tmhmm':
coords = [(i[0],i[1]-i[0]) for i in x]
elif key == 'signalp':
x = x.items()
coords = [(i[1],10,'SP') for i in x]
return coords
def get_seqdepot_annotation(genome, key='pfam27'):
"""
Get seqdepot annotations for a set of proteins in dataframe.
"""
from . import seqdepot
annot={}
for i,row in genome.iterrows():
n = row.locus_tag
seq = row.translation
#print n,seq
sd = seqdepot.new()
aseqid = sd.aseqIdFromSequence(seq)
result = sd.findOne(aseqid)
#for x in result['t']:
# print x, result['t'][x]
x = seqdepot_to_coords(result, key)
annot[n] = x
return annot
| apache-2.0 |
wrightni/OSSP | training_gui.py | 1 | 40462 | #title: Training Set Creation for Random Forest Classification
#author: Nick Wright
#Inspired by: Justin Chen
#purpose: Creates a GUI for a user to identify watershed superpixels of an image as
# melt ponds, sea ice, or open water to use as a training data set for a
# Random Forest Classification method.
# Python 3:
import tkinter as tk
# Python 2:
# import Tkinter as tk
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import h5py
import os
import argparse
from ctypes import *
import gdal
from sklearn.ensemble import RandomForestClassifier
from select import select
import sys
import preprocess as pp
from segment import segment_image
from lib import utils
from lib import attribute_calculations as attr_calc
class PrintColor:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class Buttons(tk.Frame):
# Defines the properties of all the controller buttons to be used by the GUI.
def __init__(self, parent):
tk.Frame.__init__(self, parent)
prev_btn = tk.Button(self, text="Previous Segment", width=16, height=2,
command=lambda: parent.event_manager.previous_segment())
prev_btn.grid(column=0, row=0, pady=(0,20))
water_btn = tk.Button(self, text="Open Water", width=16, height=2, highlightbackground='#000000',
command=lambda: parent.event_manager.classify("water"))
water_btn.grid(column=0, row=1, pady=1)
melt_btn = tk.Button(self, text="Melt Pond", width=16, height=2, highlightbackground='#4C678C',
command=lambda: parent.event_manager.classify("melt"))
melt_btn.grid(column=0, row=2, pady=1)
gray_btn = tk.Button(self, text="Dark and Thin Ice", width=16, height=2, highlightbackground='#D2D3D5',
command=lambda: parent.event_manager.classify("gray"))
gray_btn.grid(column=0, row=3, pady=1)
snow_btn = tk.Button(self, text="Snow or Ice", width=16, height=2,
command=lambda: parent.event_manager.classify("snow"))
snow_btn.grid(column=0, row=4, pady=1)
shadow_btn = tk.Button(self, text="Shadow", width=16, height=2, highlightbackground='#FF9200',
command=lambda: parent.event_manager.classify("shadow"))
shadow_btn.grid(column=0, row=5, pady=1)
unknown_btn = tk.Button(self, text="Unknown / Mixed", width=16, height=2,
command=lambda: parent.event_manager.classify("unknown"))
unknown_btn.grid(column=0, row=6, pady=1)
auto_btn = tk.Button(self, text="Autorun", width=16, height=2,
command=lambda: parent.event_manager.autorun())
auto_btn.grid(column=0, row=7, pady=(20,0))
next_btn = tk.Button(self, text="Next Image", width=16, height=2,
command=lambda: parent.event_manager.next_image())
next_btn.grid(column=0, row=8, pady=1)
quit_btn = tk.Button(self, text="Save and Quit", width=16, height=2,
command=lambda: parent.event_manager.quit_event())
quit_btn.grid(column=0, row=9, pady=1)
load_first_btn = tk.Button(self, text="Initialize Image", width=16, height=2,
command=lambda: parent.event_manager.initialize_image())
load_first_btn.grid(column=0, row=10, pady=(40,0))
class ProgressBar(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.total_counter = tk.StringVar()
self.total_counter.set("Total Progress: {}".format(0))
self.image_tracker = tk.StringVar()
self.image_tracker.set("")
total_text = tk.Label(self, textvariable=self.total_counter)
total_text.grid(column=0, row=0)
image_text = tk.Label(self, textvariable=self.image_tracker)
image_text.grid(column=0, row=1)
def update_progress(self):
self.total_counter.set("Total Progress: {}".format(self.parent.data.get_num_labels()))
self.image_tracker.set("Image {} of {}".format(self.parent.data.im_index + 1,
len(self.parent.data.available_images)))
class ImageDisplay(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Initialize class variables
# Populated in initialize_image method:
self.display_image = None
self.disp_xdim, self.disp_ydim, = 0, 0
# Populated in update_images:
self.zoom_win_x, self.zoom_win_y = 0, 0
# Creating the canvas where the images will be
self.fig = plt.figure(figsize=[10, 10])
self.fig.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.99, wspace=0.01, hspace=0.01)
canvas = FigureCanvasTkAgg(self.fig, self)
canvas.draw()
# toolbar = NavigationToolbar2TkAgg(canvas, frame)
canvas.get_tk_widget().grid(column=0, row=0)
# toolbar.pack(in_=frame, side='top')
self.cid = self.fig.canvas.mpl_connect('button_press_event', parent.event_manager.onclick)
# Create a placeholder while image data is loading
self.initial_display()
def initialize_image(self):
# Creates a local composite of the original image data for display
if self.parent.data.im_type == 'wv02_ms':
self.display_image = utils.create_composite([self.parent.data.original_image[4, :, :],
self.parent.data.original_image[2, :, :],
self.parent.data.original_image[1, :, :]],
dtype=np.uint8)
elif self.parent.data.im_type == 'pan':
self.display_image = utils.create_composite([self.parent.data.original_image,
self.parent.data.original_image,
self.parent.data.original_image],
dtype=np.uint8)
elif self.parent.data.im_type == 'srgb':
self.display_image = utils.create_composite([self.parent.data.original_image[0, :, :],
self.parent.data.original_image[1, :, :],
self.parent.data.original_image[2, :, :]],
dtype=np.uint8)
self.disp_xdim, self.disp_ydim = np.shape(self.display_image)[0:2]
def loading_display(self):
plt.clf()
loading_text = "Images are loading, please wait... "
# Creates a image placeholder while the data is being loaded.
ax = self.fig.add_subplot(1, 1, 1, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.5, loading_text, horizontalalignment='center', verticalalignment='center')
ax.axis('off')
# Updating the plots
self.fig.canvas.draw()
def initial_display(self):
plt.clf()
welcome_text = "No images have been loaded. Press <Initialize Image> to begin."
tds_text = "Training data file: \n {}".format(self.parent.data.tds_filename)
image_text = "Images found: \n"
if len(self.parent.data.available_images) == 0:
image_text += 'None'
else:
for im in self.parent.data.available_images:
image_text += im + '\n'
# Creates a image placeholder while the data is being loaded.
ax = self.fig.add_subplot(2, 1, 1, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.3, welcome_text, horizontalalignment='center', verticalalignment='bottom', weight='bold')
ax.axis('off')
ax2 = self.fig.add_subplot(2, 1, 2, adjustable='datalim', frame_on=False)
ax2.text(0.5, 1, tds_text, horizontalalignment='center', verticalalignment='center')
ax2.text(0.5, .9, image_text, horizontalalignment='center', verticalalignment='top')
ax2.axis('off')
# Updating the plots
self.fig.canvas.draw()
def update_images(self, segment_id):
# Clear the existing display
plt.clf()
current_seg = self.parent.data.segmented_image == segment_id # array of 0 or 1 where 1 = current segment
segment_pos = np.nonzero(current_seg) # returns the array position of the segment
zoom_size = 100
x_min = np.amin(segment_pos[0]) - zoom_size
x_max = np.amax(segment_pos[0]) + zoom_size
y_min = np.amin(segment_pos[1]) - zoom_size
y_max = np.amax(segment_pos[1]) + zoom_size
# Store the zoom window corner coordinates for reference in onclick()
# xMin and yMin are defined backwards
self.zoom_win_x = y_min
self.zoom_win_y = x_min
if x_min < 0:
x_min = 0
if x_max >= self.disp_xdim:
x_max = self.disp_xdim - 1
if y_min < 0:
y_min = 0
if y_max >= self.disp_ydim:
y_max = self.disp_ydim - 1
# Image 2 (Zoomed in image, no highlighted segment)
cropped_image = self.display_image[x_min:x_max, y_min:y_max]
# Image 3 (Zoomed in image, with segment highlight)
color_image = np.copy(self.display_image)
color_image[:, :, 0][current_seg] = 255
color_image[:, :, 2][current_seg] = 0
color_image = color_image[x_min:x_max, y_min:y_max]
# Text instructions
instructions = '''
Open Water: Surface areas that had zero ice cover
as well as those covered by an unconsolidated frazil
or grease ice. \n
Melt Pond: Surface areas with water covering ice.
Areas where meltwater is trapped in isolated patches
atop ice, and the optically similar submerged ice
near the edge of a floe. \n
Dark Ice:
Freezing season: Surfaces of thin ice that are
not snow covered, including nilas and young ice.
Melt season: ice covered by saturated slush,
but not completely submerged in water \n
Snow/Ice: Optically thick ice, and ice with a snow cover. \n
Shadow: Surfaces that are covered by a dark shadow.
\n
'''
# Plotting onto the GUI
ax = self.fig.add_subplot(2, 2, 1)
ax.imshow(color_image, interpolation='None', vmin=0, vmax=255)
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax1')
ax = self.fig.add_subplot(2, 2, 2)
ax.imshow(cropped_image, interpolation='None', vmin=0, vmax=255)
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax2')
ax = self.fig.add_subplot(2, 2, 3)
ax.imshow(self.display_image, interpolation='None', vmin=0, vmax=255)
ax.axvspan(y_min,
y_max,
1. - float(x_max) / self.disp_xdim,
1. - float(x_min) / self.disp_xdim,
color='red',
alpha=0.3)
ax.set_xlim([0, np.shape(self.display_image)[1]])
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax3')
ax = self.fig.add_subplot(2, 2, 4, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.5, instructions, horizontalalignment='center', verticalalignment='center')
ax.axis('off')
# Updating the plots
self.fig.canvas.draw()
class DataManager:
def __init__(self, available_images, tds_filename, username, im_type):
# Image and segment data (populated in load_image())
self.original_image = None
self.segmented_image = None
# Variable Values (populated in load_training_data())
self.label_vector = []
self.segment_list = []
self.feature_matrix = []
self.tracker = 0 # Number of segment sets added from the current image
self.im_index = 0 # Index for progressing through available images
# Global Static Values
self.tds_filename = tds_filename
self.username = username
self.im_type = im_type
self.available_images = available_images
# Image Static Value (populated in load_image())
self.wb_ref = None
self.br_ref = None
self.im_date = None
self.im_name = None
def load_next_image(self):
# Increment the image index
self.im_index += 1
# Loop im_index based on the available number of images
self.im_index = self.im_index % len(self.available_images)
# Load the new data
self._load_image()
def load_previous_image(self):
# If an image has already been loaded, and there is no previous data,
# prevent the user from using this button.
if self.get_num_labels() == 0 and self.im_name is not None:
return
# If labels exist find the correct image to load
if self.get_num_labels() != 0:
# If this does not find a match, im_index will default to its current value
for i in range(len(self.available_images)):
if self.get_current_segment()[0] in self.available_images[i]:
self.im_index = i
self._load_image()
def _load_image(self):
# Loads the optical and segmented image data from disk. Should only be called from
# load_next_image method.
full_image_name = self.available_images[self.im_index]
self.im_name = os.path.splitext(os.path.split(full_image_name)[1])[0]
src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)
# Read the image date from the metadata
metadata = src_ds.GetMetadata()
self.im_date = pp.parse_metadata(metadata, self.im_type)
# Determine the datatype
src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)
# Calculate the reference points from the image histogram
lower, upper, wb_ref, br_ref = pp.histogram_threshold(src_ds, src_dtype)
self.wb_ref = np.array(wb_ref, dtype=c_uint8)
self.br_ref = np.array(br_ref, dtype=c_uint8)
# Load the image data
image_data = src_ds.ReadAsArray()
# Close the GDAL dataset
src_ds = None
# Rescale the input dataset using a histogram stretch
image_data = pp.rescale_band(image_data, lower, upper)
# Apply a white balance to the image
image_data = pp.white_balance(image_data, self.wb_ref.astype(np.float), float(np.amax(self.wb_ref)))
# Convert the input data to c_uint8
self.original_image = np.ndarray.astype(image_data, c_uint8)
print("Creating segments on provided image...")
watershed_image = segment_image(image_data, image_type=self.im_type)
# Convert the segmented image to c_int datatype. This is needed for the
# Cython methods that calculate attribute of segments.
self.segmented_image = np.ndarray.astype(watershed_image, c_uint32)
# Clear these from memory explicitly
image_data = None
watershed_image = None
def load_training_data(self):
try:
with h5py.File(self.tds_filename, 'r') as data_file:
# Load the existing feature matrix and segment list if they exist,
# otherwise initialize an empty array for these lists.
if 'feature_matrix' in list(data_file.keys()):
self.feature_matrix = data_file['feature_matrix'][:].tolist()
else:
self.feature_matrix = []
if 'segment_list' in list(data_file.keys()):
# For loading files created in py2
self.segment_list = [[name[0].decode(), name[1].decode()] for name in data_file['segment_list']]
else:
self.segment_list = []
# Determine if this user has data already stored in the training set. If so,
# use the existing classifications. If not, start from the beginning.
# must use .tolist() because datasets in h5py files are numpy arrays, and we want
# these as python lists.
# [y1...yn] column vector where n : number of classified segments, y = classification
if self.username in list(data_file.keys()):
self.label_vector = data_file[self.username][:].tolist()
else:
self.label_vector = []
# If the file does not exist, create empty values
except OSError:
self.feature_matrix = []
self.segment_list = []
self.label_vector = []
def get_num_labels(self):
return len(self.label_vector)
def append_label(self, label):
self.tracker += 1
self.label_vector.append(label)
# Removes the last entry from label_vector
def remove_last_label(self):
self.label_vector.pop()
self.tracker -= 1
def get_num_segments(self):
return len(self.segment_list)
# The current segment is the next one that doesn't have an associated label
def get_current_segment(self):
return self.segment_list[len(self.label_vector)]
def add_single_segment(self, new_segment):
self.segment_list.append(new_segment)
# Trims all unclassified segments from segment_list by trimming it to
# the length of label_vector
def trim_segment_list(self):
self.segment_list = self.segment_list[:len(self.label_vector)]
# Add 10 randomly selected segments to the list of ones to classify
def add_segments(self):
segments_to_add = []
a = 0
# Select random x,y coordinates from the input image, and pick the segment where the random
# pixel lands. This makes the selected segments representative of the average surface
# distribution within the image. This still wont work if the image has a minority of any
# particular surface type.
while len(segments_to_add)<10:
a += 1
z, x, y = np.shape(self.original_image)
i = np.random.randint(x)
j = np.random.randint(y)
# Find the segment label at the random pixel
segment_id = self.segmented_image[i][j]
sp_size = np.sum(self.segmented_image == segment_id)
if sp_size >= 20:
# Check for a duplicate segment already in the tds
new_segment = [self.im_name,
"{}".format(segment_id)]
if new_segment not in self.segment_list and new_segment not in segments_to_add:
segments_to_add.append(new_segment)
print(("Attempts: {}".format(a)))
self.segment_list += segments_to_add
def compute_attributes(self, segment_id):
# Create the a attribute list for the labeled segment
feature_array = calc_attributes(self.original_image, self.segmented_image,
self.wb_ref, self.br_ref, self.im_date, segment_id, self.im_type)
# attribute_calculations returns a 2d array, but we only want the 1d list of features.
feature_array = feature_array[0]
return feature_array
def append_features(self, feature_array):
# If there are fewer features than labels, assume the new one should be appended
# to the end
if len(self.feature_matrix) == len(self.label_vector) - 1:
#Adding all of the features found for this watershed to the main matrix
self.feature_matrix.append(feature_array)
# Otherwise replace the existing features with the newly calculated ones.
# (Maybe just skip this in the future and assume they were calculated correctly before?
else:
# old_feature_array = self.feature_matrix[len(self.label_vector) - 1]
print("Recalculated Feature.")
# print(("Old: {} {}".format(old_feature_array[0], old_feature_array[1])))
# print(("New: {} {}".format(feature_array[0], feature_array[1])))
self.feature_matrix[len(self.label_vector) - 1] = feature_array
class EventManager:
def __init__(self, parent):
self.parent = parent
self.is_active = False # Prevents events from happening while images are loading
def activate(self):
self.is_active = True
def deactivate(self):
self.is_active = False
def next_segment(self):
if not self.is_active:
return
# If all of the segments in the predefined list have been classified already,
# present the user with a random new segment.
if self.parent.data.get_num_labels() == self.parent.data.get_num_segments():
# I think if segment_list == [] is covered by the above..?
self.parent.data.add_segments()
# retrain the random forest model if the live predictor is active
if self.parent.live_predictor.is_active():
self.parent.live_predictor.retrain_model(self.parent.data.feature_matrix,
self.parent.data.label_vector)
# The current segment is the next one that doesn't have an associated label
current_segment = self.parent.data.get_current_segment()
segment_id = int(current_segment[1])
# Redraw the display with the new segment id
self.parent.image_display.update_images(segment_id)
def previous_segment(self):
if not self.is_active:
return
# Make sure this function returns null if there is no previous sp to go back to
if self.parent.data.get_num_labels() == 0:
return
else:
# Delete the last label in the list, then get the 'new' current segment
self.parent.data.remove_last_label()
current_segment = self.parent.data.get_current_segment()
self.parent.progress_bar.update_progress()
if current_segment[0] != self.parent.data.im_name:
self.previous_image()
return
segment_id = int(current_segment[1])
# Redraw the display with the new segment id
self.parent.image_display.update_images(segment_id)
def onclick(self, event):
if not self.is_active:
return
if event.inaxes is not None:
axes_properties = event.inaxes.properties()
segment_id = -1
x, y = 0, 0
# If the mouse click was in the overview image
if axes_properties['label'] == 'ax3':
x = int(event.xdata)
y = int(event.ydata)
segment_id = self.parent.data.segmented_image[y, x]
# Either of the top zoomed windows
if axes_properties['label'] == 'ax1' or axes_properties['label'] == 'ax2':
win_x = int(event.xdata)
win_y = int(event.ydata)
x = self.parent.image_display.zoom_win_x + win_x
y = self.parent.image_display.zoom_win_y + win_y
segment_id = self.parent.data.segmented_image[y, x]
# If user clicked on a valid location, add the segment that was clicked on to segment_list,
# then update the image render.
if segment_id >= 0:
print(("You clicked at ({}, {}) in {}".format(x, y, axes_properties['label'])))
print(("Segment id: {}".format(segment_id)))
new_segment = [self.parent.data.im_name,
"{}".format(segment_id)]
if new_segment not in self.parent.data.segment_list:
# Trim all unclassified segments
self.parent.data.trim_segment_list()
# Add the selected one as the next segment
self.parent.data.add_single_segment(new_segment)
# Get the new current segment and redraw display
segment_id = int(self.parent.data.get_current_segment()[1])
self.parent.image_display.update_images(segment_id)
else:
print("This segment has already been labeled")
def classify(self, key_press):
if not self.is_active:
return
# Assigning the highlighted segment a classification
segment_id = int(self.parent.data.get_current_segment()[1])
print("Segment ID: {}".format(segment_id))
# Note that we classified one more image
if key_press == "snow":
self.parent.data.append_label(1)
elif key_press == "gray":
self.parent.data.append_label(2)
elif key_press == "melt":
self.parent.data.append_label(3)
elif key_press == "water":
self.parent.data.append_label(4)
elif key_press == "shadow":
self.parent.data.append_label(5)
elif key_press == "unknown":
self.parent.data.append_label(6)
# Calculate the attributes for the current segment
feature_array = self.parent.data.compute_attributes(segment_id)
self.parent.data.append_features(feature_array)
# Printing some useful statistics
print("Assigned value: {} ({})".format(str(self.parent.data.label_vector[-1]), key_press))
if self.parent.live_predictor.is_active():
self.parent.live_predictor.print_prediction(feature_array)
print(("~"*80))
self.parent.progress_bar.update_progress()
self.next_segment()
# if len(self.feature_matrix) == len(self.label_vector)-1:
# #Adding all of the features found for this watershed to the main matrix
# self.feature_matrix.append(feature_array)
# else:
# old_feature_array = self.feature_matrix[len(self.label_vector)-1]
# print("Recalculated Feature.")
# print(("Old: {} {}".format(old_feature_array[0],old_feature_array[1])))
# print(("New: {} {}".format(feature_array[0], feature_array[1])))
# self.feature_matrix[len(self.label_vector)-1] = feature_array
def autorun(self):
if not self.is_active:
return
# In the future make this function a standalone window (instead of terminal output)??
# Prevent the user from accessing this if the predictor is inactive
if not self.parent.live_predictor.is_active():
print("Autorun functionality disabled")
return
# segment_id = int(self.segment_list[len(self.label_vector):][0][1])
segment_id = int(self.parent.data.get_current_segment()[1])
# Create the a attribute list for the labeled segment
feature_array = self.parent.data.compute_attributes(segment_id)
# feature_array = calc_attributes(self.original_image, self.secondary_image,
# self.wb_ref, self.br_ref, self.im_date, segment_id, self.im_type)
print("~" * 80)
# This both prints the results of the prediction for the user to check, and also returns the
# predicted values for use here.
pred, proba = self.parent.live_predictor.print_prediction(feature_array)
if 0.90 < proba < 0.96:
timeout = 4 #6
print((PrintColor.BOLD + "Label if incorrect:" + PrintColor.END))
elif proba < .9:
timeout = 10 #12
print((PrintColor.BOLD + PrintColor.RED + "Label if incorrect:" + PrintColor.END))
else:
timeout = 0.5
# Prompt the user to change the classification if they dont agree with the
# predicted one. If no input is recieved, the predicted one is assumed to be correct.
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline()
try:
s = int(s)
except ValueError:
print("Ending autorun.")
return
if 0 <= s < 6:
label = s
print(("Assigning label {} instead.".format(label)))
else:
print("Ending autorun.")
return
else:
label = pred
print(("No input. Assigning label: {}".format(label)))
self.parent.data.append_label(label)
self.parent.data.append_features(feature_array)
self.parent.progress_bar.update_progress()
self.next_segment()
self.parent.after(100, self.autorun)
def save(self):
if self.parent.data.label_vector == []:
return
print("Saving...")
username = self.parent.data.username
prev_names = []
prev_data = []
try:
with h5py.File(self.parent.data.tds_filename, 'r') as infile:
# Compiles all of the user data that was in the previous training validation file so that
# it can be added to the new file as well. (Because erasing and recreating a .h5 is easier
# than altering an existing one)
for prev_user in list(infile.keys()):
if prev_user != 'feature_matrix' and prev_user != 'segment_list' and prev_user != username:
prev_names.append(prev_user)
prev_data.append(infile[prev_user][:])
infile.close()
except OSError:
pass
# overwrite the h5 dataset with the updated information
with h5py.File(self.parent.data.tds_filename, 'w') as outfile:
outfile.create_dataset('feature_matrix', data=self.parent.data.feature_matrix)
outfile.create_dataset(username, data=self.parent.data.label_vector)
segment_list = np.array(self.parent.data.segment_list, dtype=np.string_)
outfile.create_dataset('segment_list', data=segment_list)
for i in range(len(prev_names)):
outfile.create_dataset(prev_names[i], data=prev_data[i])
print("Done.")
def next_image(self):
if not self.is_active:
return
self.deactivate()
# Trim the unlabeled segments from segment list
self.parent.data.trim_segment_list()
# Save the existing data
self.save()
# Set the display to the loading screen
self.parent.after(10, self.parent.image_display.loading_display())
# Load the next image data
self.parent.data.load_next_image()
# Add the new data to the display class
self.parent.image_display.initialize_image()
# Update the display screen
# Go to the next segment (which will add additional segments to the queue and update the display)
self.parent.progress_bar.update_progress()
self.activate()
self.next_segment()
def previous_image(self):
self.deactivate()
# Set the display to the loading screen
self.parent.after(10, self.parent.image_display.loading_display())
# Load the previous image data
self.parent.data.load_previous_image()
# Add the new data to the display class
self.parent.image_display.initialize_image()
# Update the display screen
# Go to the next segment (which will add additional segments to the queue and update the display)
self.parent.progress_bar.update_progress()
self.activate()
self.next_segment()
def initialize_image(self):
if len(self.parent.data.available_images) == 0:
print("No images to load!")
return
# Check to make sure no data has been loaded
if self.parent.data.im_name is not None:
return
# Previous image does all the loading work we need for the first image
self.previous_image()
def quit_event(self):
# Exits the GUI, automatically saves progress
self.save()
self.parent.exit_gui()
class LivePredictor:
def __init__(self, active_state):
self.active_state = active_state
self.is_trained = False
self.rfc = RandomForestClassifier(n_estimators=100)
# True if LivePredictor is running, false otherwise
def is_active(self):
return self.active_state
def retrain_model(self, feature_matrix, label_vector):
if len(label_vector) >= 10:
self.rfc.fit(feature_matrix[:len(label_vector)], label_vector)
self.is_trained = True
def print_prediction(self, feature_array):
if self.is_trained:
pred = self.rfc.predict(feature_array.reshape(1, -1))[0]
pred_prob = self.rfc.predict_proba(feature_array.reshape(1, -1))[0]
pred_prob = np.amax(pred_prob)
print(("Predicted value: {}{}{} ({})".format(PrintColor.PURPLE, pred, PrintColor.END, pred_prob)))
return pred, pred_prob
else:
return 0, 0
class TrainingWindow(tk.Frame):
def __init__(self, parent, img_list, tds_filename, username, im_type, activate_autorun=False):
tk.Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Training GUI")
# Create the controlling buttons and place them on the right side.
self.buttons = Buttons(self)
self.buttons.grid(column=1, row=1, sticky="N")
# Manager for all the GUI events (e.g. button presses)
self.event_manager = EventManager(self)
# Data manager object
self.data = DataManager(img_list, tds_filename, username, im_type)
self.data.load_training_data()
# Create the image display window
self.image_display = ImageDisplay(self)
self.image_display.grid(column=0, row=0, rowspan=2)
self.progress_bar = ProgressBar(self)
self.progress_bar.grid(column=1, row=0)
self.progress_bar.update_progress()
# Object for creating on the fly predictions and managing the auto_run method
self.live_predictor = LivePredictor(activate_autorun)
# Define keybindings
self.parent.bind('1', lambda e: self.event_manager.classify("snow"))
self.parent.bind('2', lambda e: self.event_manager.classify("gray"))
self.parent.bind('3', lambda e: self.event_manager.classify("melt"))
self.parent.bind('4', lambda e: self.event_manager.classify("water"))
self.parent.bind('5', lambda e: self.event_manager.classify("shadow"))
self.parent.bind('<Tab>', lambda e: self.event_manager.classify("unknown"))
self.parent.bind('<BackSpace>', lambda e: self.event_manager.previous_segment())
def exit_gui(self):
self.parent.quit()
self.parent.destroy()
def calc_attributes(original_image, secondary_image,
wb_ref, br_ref, im_date, segment_id, im_type):
feature_array = []
if im_type == 'pan':
feature_array = attr_calc.analyze_pan_image(original_image,
secondary_image,
im_date,
segment_id=segment_id)
if im_type == 'srgb':
feature_array = attr_calc.analyze_srgb_image(original_image,
secondary_image,
segment_id=segment_id)
if im_type == 'wv02_ms':
feature_array = attr_calc.analyze_ms_image(original_image,
secondary_image,
wb_ref,
br_ref,
segment_id=segment_id)
return feature_array
# Returns all of the unique images in segment_list
def get_required_images(segment_list):
image_list = []
for seg_id in segment_list:
if not seg_id[0] in image_list:
image_list.append(seg_id[0])
return image_list
def validate_tds_file(tds_filename, input_dir, image_type):
# Set the default tds filename if this was not entered
if tds_filename is None:
tds_filename = os.path.join(input_dir, image_type + "_training_data.h5")
elif os.path.isfile(tds_filename):
# If a real file was given, try opening it.
try:
data_file = h5py.File(tds_filename, 'r')
data_file.close()
except OSError:
print("Invalid data file.")
quit()
return tds_filename
# Finds all the unique images from the given directory
def scrape_dir(src_dir):
image_list = []
for ext in utils.valid_extensions:
raw_list = utils.get_image_paths(src_dir, keyword=ext)
for raw_im in raw_list:
image_list.append(raw_im)
# Save only the unique entries
image_list = list(set(image_list))
utils.remove_hidden(image_list)
return image_list
if __name__ == "__main__":
#### Set Up Arguments
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="folder containing training images")
parser.add_argument("image_type", type=str, choices=['srgb','wv02_ms','pan'],
help="image type: 'srgb', 'wv02_ms', 'pan'")
parser.add_argument("--tds_file", type=str, default=None,
help='''Existing training dataset file. Will create a new one with this name if none exists.
default: <image_type>_training_data.h5''')
parser.add_argument("--username", type=str, default=None,
help='''username to associate with the training set.
default: image_type''')
parser.add_argument("-a", "--enable_autorun", action="store_true",
help='''Enables the use of the autorun function.''')
# Parse Arguments
args = parser.parse_args()
input_dir = os.path.abspath(args.input)
image_type = args.image_type
autorun_flag = args.enable_autorun
# Add the images in the provided folder to the image list
img_list = scrape_dir(input_dir)
tds_file = validate_tds_file(args.tds_file, input_dir, image_type)
if args.username is None:
user_name = image_type
else:
user_name = args.username
root = tk.Tk()
TrainingWindow(root, img_list, tds_file, user_name, image_type,
activate_autorun=autorun_flag).pack(side='top', fill='both', expand=True)
root.mainloop()
| mit |
HolgerPeters/scikit-learn | sklearn/metrics/tests/test_ranking.py | 46 | 41270 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
dsm054/pandas | asv_bench/benchmarks/io/csv.py | 3 | 7375 | import random
import string
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Categorical, date_range, read_csv
from pandas.compat import cStringIO as StringIO
from ..pandas_vb_common import BaseIO
class ToCSV(BaseIO):
fname = '__test__.csv'
params = ['wide', 'long', 'mixed']
param_names = ['kind']
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame({'A': np.arange(50000),
'B': np.arange(50000) + 1.,
'C': np.arange(50000) + 2.,
'D': np.arange(50000) + 3.})
mixed_frame = DataFrame({'float': np.random.randn(5000),
'int': np.random.randn(5000).astype(int),
'bool': (np.arange(5000) % 2) == 0,
'datetime': date_range('2001',
freq='s',
periods=5000),
'object': ['foo'] * 5000})
mixed_frame.loc[30:500, 'float'] = np.nan
data = {'wide': wide_frame,
'long': long_frame,
'mixed': mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
fname = '__test__.csv'
def setup(self):
rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
class StringIORewind(object):
def data(self, stringio_object):
stringio_object.seek(0)
return stringio_object
class ReadCSVDInferDatetimeFormat(StringIORewind):
params = ([True, False], ['custom', 'iso8601', 'ymd'])
param_names = ['infer_datetime_format', 'format']
def setup(self, infer_datetime_format, format):
rng = date_range('1/1/2000', periods=1000)
formats = {'custom': '%m/%d/%Y %H:%M:%S.%f',
'iso8601': '%Y-%m-%d %H:%M:%S',
'ymd': '%Y%m%d'}
dt_format = formats[format]
self.StringIO_input = StringIO('\n'.join(
rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(self.data(self.StringIO_input),
header=None, names=['foo'], parse_dates=['foo'],
infer_datetime_format=infer_datetime_format)
class ReadCSVSkipRows(BaseIO):
fname = '__test__.csv'
params = [None, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame({'float1': np.random.randn(N),
'float2': np.random.randn(N),
'string1': ['foo'] * N,
'bool1': [True] * N,
'int1': np.random.randint(0, N, size=N)},
index=index)
df.to_csv(self.fname)
def time_skipprows(self, skiprows):
read_csv(self.fname, skiprows=skiprows)
class ReadUint64Integers(StringIORewind):
def setup(self):
self.na_values = [2**63 + 500]
arr = np.arange(10000).astype('uint64') + 2**63
self.data1 = StringIO('\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO('\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self):
read_csv(self.data(self.data1), header=None, names=['foo'])
def time_read_uint64_neg_values(self):
read_csv(self.data(self.data2), header=None, names=['foo'])
def time_read_uint64_na_values(self):
read_csv(self.data(self.data1), header=None, names=['foo'],
na_values=self.na_values)
class ReadCSVThousands(BaseIO):
fname = '__test__.csv'
params = ([',', '|'], [None, ','])
param_names = ['sep', 'thousands']
def setup(self, sep, thousands):
N = 10000
K = 8
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = ':{}'.format(thousands)
fmt = '{' + fmt + '}'
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
def time_thousands(self, sep, thousands):
read_csv(self.fname, sep=sep, thousands=thousands)
class ReadCSVComment(StringIORewind):
def setup(self):
data = ['A,B,C'] + (['1,2,3 # comment'] * 100000)
self.StringIO_input = StringIO('\n'.join(data))
def time_comment(self):
read_csv(self.data(self.StringIO_input), comment='#',
header=None, names=list('abc'))
class ReadCSVFloatPrecision(StringIORewind):
params = ([',', ';'], ['.', '_'], [None, 'high', 'round_trip'])
param_names = ['sep', 'decimal', 'float_precision']
def setup(self, sep, decimal, float_precision):
floats = [''.join(random.choice(string.digits) for _ in range(28))
for _ in range(15)]
rows = sep.join(['0{}'.format(decimal) + '{}'] * 3) + '\n'
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(self.data(self.StringIO_input), sep=sep, header=None,
names=list('abc'), float_precision=float_precision)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(self.data(self.StringIO_input), sep=sep, header=None,
engine='python', float_precision=None, names=list('abc'))
class ReadCSVCategorical(BaseIO):
fname = '__test__.csv'
def setup(self):
N = 100000
group1 = ['aaaaaaaa', 'bbbbbbb', 'cccccccc', 'dddddddd', 'eeeeeeee']
df = DataFrame(np.random.choice(group1, (N, 3)), columns=list('abc'))
df.to_csv(self.fname, index=False)
def time_convert_post(self):
read_csv(self.fname).apply(Categorical)
def time_convert_direct(self):
read_csv(self.fname, dtype='category')
class ReadCSVParseDates(StringIORewind):
def setup(self):
data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n
{},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n
{},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n
{},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n
{},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n
"""
two_cols = ['KORD,19990127'] * 5
data = data.format(*two_cols)
self.StringIO_input = StringIO(data)
def time_multiple_date(self):
read_csv(self.data(self.StringIO_input), sep=',', header=None,
names=list(string.digits[:9]),
parse_dates=[[1, 2], [1, 3]])
def time_baseline(self):
read_csv(self.data(self.StringIO_input), sep=',', header=None,
parse_dates=[1],
names=list(string.digits[:9]))
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
blaze/distributed | distributed/protocol/tests/test_pandas.py | 1 | 2399 | import numpy as np
import pandas as pd
import pytest
from dask.dataframe.utils import assert_eq
from distributed.protocol import serialize, deserialize, decompress
dfs = [
pd.DataFrame({}),
pd.DataFrame({"x": [1, 2, 3]}),
pd.DataFrame({"x": [1.0, 2.0, 3.0]}),
pd.DataFrame({0: [1, 2, 3]}),
pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [4.0, 5.0, 6.0]}),
pd.DataFrame({"x": [1.0, 2.0, 3.0]}, index=pd.Index([4, 5, 6], name="bar")),
pd.Series([1.0, 2.0, 3.0]),
pd.Series([1.0, 2.0, 3.0], name="foo"),
pd.Series([1.0, 2.0, 3.0], name="foo", index=[4, 5, 6]),
pd.Series([1.0, 2.0, 3.0], name="foo", index=pd.Index([4, 5, 6], name="bar")),
pd.DataFrame({"x": ["a", "b", "c"]}),
pd.DataFrame({"x": [b"a", b"b", b"c"]}),
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"], ordered=True)}),
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"], ordered=False)}),
pd.Index(pd.Categorical(["a"], categories=["a", "b"], ordered=True)),
pd.date_range("2000", periods=12, freq="B"),
pd.RangeIndex(10),
pd.DataFrame(
"a",
index=pd.Index(["a", "b", "c", "d"], name="a"),
columns=pd.Index(["A", "B", "C", "D"], name="columns"),
),
pd.DataFrame(
np.random.randn(10, 5), columns=list("ABCDE"), index=list("abcdefghij")
),
pd.DataFrame(
np.random.randn(10, 5), columns=list("ABCDE"), index=list("abcdefghij")
).where(lambda x: x > 0),
pd.DataFrame(
{
"a": [0.0, 0.1],
"B": [0.0, 1.0],
"C": ["a", "b"],
"D": pd.to_datetime(["2000", "2001"]),
}
),
pd.Series(["a", "b", "c"], index=["a", "b", "c"]),
pd.DataFrame(
np.random.randn(10, 5),
columns=list("ABCDE"),
index=pd.period_range("2000", periods=10, freq="B"),
),
pd.DataFrame(
np.random.randn(10, 5),
columns=list("ABCDE"),
index=pd.date_range("2000", periods=10, freq="B"),
),
pd.Series(
np.random.randn(10), name="a", index=pd.date_range("2000", periods=10, freq="B")
),
pd.Index(["סשםקה7ךשץא", "8טלכז6לרפל"]),
]
@pytest.mark.parametrize("df", dfs)
def test_dumps_serialize_numpy(df):
header, frames = serialize(df)
if "compression" in header:
frames = decompress(header, frames)
df2 = deserialize(header, frames)
assert_eq(df, df2)
| bsd-3-clause |
ericxk/MachineLearningExercise | ML_in_action/chapter7/adaboost.py | 1 | 6050 | from numpy import *
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
##通过阈值比较对数据进行分类,在阈值一边的数据会分到类别-1,其中lt是小于等于
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
##遍历stumpClassify函数所有的可能输入值,并找到数据集上最佳的单层决策树
##前面几行代码是用来转换矩阵格式
##D是权重向量,bestStump用来存储给定权重向量D时所得到的空字典
##numSteps在特征的所有可能值上进行遍历,minError为最小错误率
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
weightedError = D.T*errArr #calc total error multiplied by D
#print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
##基于单层决策树的adaboost训练过程
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
#print "D:",D.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
#print "aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
#print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
dataMat,classLabels=loadSimpData()
print adaBoostTrainDS(dataMat,classLabels,4)
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print aggClassEst
return sign(aggClassEst)
##自适应数据加载函数
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print "the Area Under the Curve is: ",ySum*xStep | mit |
anirudhjayaraman/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
c-m/Licenta | src/data_loader.py | 1 | 5476 | # load datasets from files
import csv
import numpy as np
import os
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
DATA_PATH = '../data_sets/'
GRADE_FEATURES_FILE = 'note_aa.csv'
GRADE_LABELS_FILE = 'note_aa.data'
TRAIN_TO_TEST_RATIO = 0.8
class DatasetContaier(dict):
def __init__(self, **kwargs):
super(DatasetContaier, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
pass
def load_grades():
"""Load and return student grades dataset from files
"""
program_path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(program_path, DATA_PATH, GRADE_FEATURES_FILE)) as csv_file:
data = csv.reader(csv_file)
line = next(data)
n_examples = int(line[0])
n_features = int(line[1])
feature_w = float(line[2])
test_adjustment_factor = int(line[3])
feature_names = next(data)
data_set = np.zeros((n_examples, n_features))
discrete_labels_set = np.zeros(n_examples)
for i, ex in enumerate(data):
data_set[i] = np.array(ex[:-1], dtype=np.float)
discrete_labels_set[i] = np.array(ex[-1], dtype=np.int)
with open(os.path.join(program_path, DATA_PATH, GRADE_LABELS_FILE)) as csv_file:
data = csv.reader(csv_file)
continuous_labels_set = np.zeros((n_examples, 2))
for i, ex in enumerate(data):
continuous_labels_set[i] = np.array(ex, dtype=np.float)
n_training = int(np.ceil(n_examples*TRAIN_TO_TEST_RATIO))
n_test = n_examples - n_training
train_data_set = data_set[:n_training]
test_data_set = data_set[n_training:]
train_discrete_labels_set = discrete_labels_set[:n_training]
test_discrete_labels_set = discrete_labels_set[n_training:]
train_continuous_labels_set = continuous_labels_set[:n_training]
test_continuous_labels_set = continuous_labels_set[n_training:]
return DatasetContaier(train_data=train_data_set, test_data=test_data_set,
train_discrete_labels=train_discrete_labels_set, test_discrete_labels=test_discrete_labels_set,
train_continuous_labels=train_continuous_labels_set, test_continuous_labels=test_continuous_labels_set,
feature_weight=feature_w, test_factor=test_adjustment_factor,
feature_names=feature_names)
def preprocess_data(data, poly_features=False):
"""Apply some preprocessing before using the dataset
"""
#scale test grades to the same interval as hw grades (0-0.5)
train_data = data['train_data']
test_data = data['test_data']
i = 0
for feature in data['feature_names']:
if feature[0] == 't':
train_data[:,i] /= data['test_factor']
test_data[:,i] /= data['test_factor']
i += 1
#PCA visualization
plot_pca = True
if plot_pca:
pca = PCA(n_components=2)
all_examples = np.vstack((train_data, test_data))
X_r = pca.fit(all_examples).transform(all_examples)
print all_examples
print pca.components_
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
target_names = ['failed', 'passed']
y = np.hstack((data['train_discrete_labels'], data['test_discrete_labels']))
#transform final_grades for binary classification (failed/passed)
y[y < 5] = 0
y[y >= 5] = 1
for c, i, target_name in zip("rg", [0, 1], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of students dataset')
plt.show()
#scale the dataset to have the mean=0 and variance=1
scaler = StandardScaler()
scaler.fit(train_data)
train_data = scaler.transform(train_data)
#apply same transformation to test_data
test_data = scaler.transform(test_data)
if poly_features == True:
poly = PolynomialFeatures(degree=train_data.shape[1], interaction_only=True)
train_data = poly.fit_transform(train_data)
test_data = poly.fit_transform(test_data)
data['train_data'] = train_data
data['test_data'] = test_data
return data
def main():
students_data = load_grades()
#print students_data['test_data']
students_data = preprocess_data(students_data, poly_features=False)
#print students_data['test_data'][0]
sum_features = np.hstack((students_data['train_continuous_labels'][:,0], students_data['test_continuous_labels'][:,0]))
exam_grades = np.hstack((students_data['train_continuous_labels'][:,1], students_data['test_continuous_labels'][:,1]))
sum_features = sum_features - exam_grades
final_grades = np.hstack((students_data['train_discrete_labels'], students_data['test_discrete_labels']))
#transform final_grades for binary classification (failed/passed)
final_grades[final_grades < 5] = 0
final_grades[final_grades >= 5] = 1
#transform exam_grades to match the four classes (0-1,1-2,2-3,3-4)
exam_grades = np.ceil(exam_grades)
#Encode labels to values: 0,1,2,3
le = LabelEncoder()
le.fit(exam_grades)
exam_grades = le.transform(exam_grades)
plt.xlabel("sum(features) == semester points")
plt.ylabel("exam_grade")
plot_data = plt.plot(sum_features, final_grades, 'ro',
label = 'Final grades based on semester points\n0=failed, 1=passed')
plt.axis([2, 7, -1, 2])
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| mit |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/shared_docs.py | 1 | 10730 | from typing import Dict
_shared_docs: Dict[str, str] = {}
_shared_docs[
"aggregate"
] = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a {klass} or when passed to {klass}.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
{axis}
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
{see_also}
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
{examples}"""
_shared_docs[
"compare"
] = """
Compare to another {klass} and show the differences.
.. versionadded:: 1.1.0
Parameters
----------
other : {klass}
Object to compare with.
align_axis : {{0 or 'index', 1 or 'columns'}}, default 1
Determine which axis to align the comparison on.
* 0, or 'index' : Resulting differences are stacked vertically
with rows drawn alternately from self and other.
* 1, or 'columns' : Resulting differences are aligned horizontally
with columns drawn alternately from self and other.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
"""
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
.. deprecated:: 1.1.0
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
dropna : bool, default True
If True, and if group keys contain NA values, NA values together
with row/column will be dropped.
If False, NA values will also be treated as the key in groups
.. versionadded:: 1.1.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
_shared_docs[
"melt"
] = """
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
ignore_index : bool, default True
If True, original index is ignored. If False, the original index is retained.
Index labels will be repeated as necessary.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
%(other)s : Identical method.
pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
DataFrame.pivot : Return reshaped DataFrame organized
by given index / column values.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
Original index values can be kept around:
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
A variable value
0 a B 1
1 b B 3
2 c B 5
0 a C 2
1 b C 4
2 c C 6
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a {klass} with transformed values.
Produced {klass} will have same axis length as self.
Parameters
----------
func : function, str, list-like or dict-like
Function to use for transforming the data. If a function, must either
work when passed a {klass} or when passed to {klass}.apply. If func
is both list-like and dict-like, dict-like behavior takes precedence.
Accepted combinations are:
- function
- string function name
- list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
- dict-like of axis labels -> functions, function names or list-like of such.
{axis}
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
{klass}
A {klass} that must have the same length as self.
Raises
------
ValueError : If the returned {klass} has a different length than self.
See Also
--------
{klass}.agg : Only perform aggregating type operations.
{klass}.apply : Invoke function on a {klass}.
Examples
--------
>>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting {klass} must have the same length as the
input {klass}, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
You can call transform on a GroupBy object:
>>> df = pd.DataFrame({{
... "Date": [
... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05",
... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"],
... "Data": [5, 8, 6, 1, 50, 100, 60, 120],
... }})
>>> df
Date Data
0 2015-05-08 5
1 2015-05-07 8
2 2015-05-06 6
3 2015-05-05 1
4 2015-05-08 50
5 2015-05-07 100
6 2015-05-06 60
7 2015-05-05 120
>>> df.groupby('Date')['Data'].transform('sum')
0 55
1 108
2 66
3 121
4 55
5 108
6 66
7 121
Name: Data, dtype: int64
>>> df = pd.DataFrame({{
... "c": [1, 1, 1, 2, 2, 2, 2],
... "type": ["m", "n", "o", "m", "m", "n", "n"]
... }})
>>> df
c type
0 1 m
1 1 n
2 1 o
3 2 m
4 2 m
5 2 n
6 2 n
>>> df['size'] = df.groupby('c')['type'].transform(len)
>>> df
c type size
0 1 m 3
1 1 n 3
2 1 o 3
3 2 m 4
4 2 m 4
5 2 n 4
6 2 n 4
"""
_shared_docs[
"storage_options"
] = """storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a non-fsspec URL.
See the fsspec and backend storage implementation docs for the set of
allowed keys and values."""
| gpl-2.0 |
raincoatrun/basemap | doc/users/figures/hurrtracks.py | 6 | 1695 | """
draw Atlantic Hurricane Tracks for storms that reached Cat 4 or 5.
part of the track for which storm is cat 4 or 5 is shown red.
ESRI shapefile data from http://nationalatlas.gov/mld/huralll.html
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Lambert Conformal Conic map.
m = Basemap(llcrnrlon=-100.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
resolution ='l',area_thresh=1000.)
# read shapefile.
shp_info = m.readshapefile('../../../examples/huralll020','hurrtracks',drawbounds=False)
# find names of storms that reached Cat 4.
names = []
for shapedict in m.hurrtracks_info:
cat = shapedict['CATEGORY']
name = shapedict['NAME']
if cat in ['H4','H5'] and name not in names:
# only use named storms.
if name != 'NOT NAMED': names.append(name)
# plot tracks of those storms.
for shapedict,shape in zip(m.hurrtracks_info,m.hurrtracks):
name = shapedict['NAME']
cat = shapedict['CATEGORY']
if name in names:
xx,yy = zip(*shape)
# show part of track where storm > Cat 4 as thick red.
if cat in ['H4','H5']:
m.plot(xx,yy,linewidth=1.5,color='r')
elif cat in ['H1','H2','H3']:
m.plot(xx,yy,color='k')
# draw coastlines, meridians and parallels.
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966',lake_color='#99ffff')
m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
plt.title('Atlantic Hurricane Tracks (Storms Reaching Category 4, 1851-2004)')
plt.show()
| gpl-2.0 |
pv/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
kashif/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 8 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
icrtiou/coursera-ML | helper/anomaly.py | 1 | 1667 | import numpy as np
from scipy import stats
from sklearn.metrics import f1_score, classification_report
# X data shape
# array([[ 13.04681517, 14.74115241],
# [ 13.40852019, 13.7632696 ],
# [ 14.19591481, 15.85318113],
# [ 14.91470077, 16.17425987],
# [ 13.57669961, 14.04284944]])
def select_threshold(X, Xval, yval):
"""use CV data to find the best epsilon
Returns:
e: best epsilon with the highest f-score
f-score: such best f-score
"""
# create multivariate model using training data
mu = X.mean(axis=0)
cov = np.cov(X.T)
multi_normal = stats.multivariate_normal(mu, cov)
# this is key, use CV data for fine tuning hyper parameters
pval = multi_normal.pdf(Xval)
# set up epsilon candidates
epsilon = np.linspace(np.min(pval), np.max(pval), num=10000)
# calculate f-score
fs = []
for e in epsilon:
y_pred = (pval <= e).astype('int')
fs.append(f1_score(yval, y_pred))
# find the best f-score
argmax_fs = np.argmax(fs)
return epsilon[argmax_fs], fs[argmax_fs]
def predict(X, Xval, e, Xtest, ytest):
"""with optimal epsilon, combine X, Xval and predict Xtest
Returns:
multi_normal: multivariate normal model
y_pred: prediction of test data
"""
Xdata = np.concatenate((X, Xval), axis=0)
mu = Xdata.mean(axis=0)
cov = np.cov(Xdata.T)
multi_normal = stats.multivariate_normal(mu, cov)
# calculate probability of test data
pval = multi_normal.pdf(Xtest)
y_pred = (pval <= e).astype('int')
print(classification_report(ytest, y_pred))
return multi_normal, y_pred
| mit |
joernhees/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 56 | 13916 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_metric_params():
# Tests that DBSCAN works with the metrics_params argument.
eps = 0.8
min_samples = 10
p = 1
# Compute DBSCAN with metric_params arg
db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps,
min_samples=min_samples, algorithm='ball_tree').fit(X)
core_sample_1, labels_1 = db.core_sample_indices_, db.labels_
# Test that sample labels are the same as passing Minkowski 'p' directly
db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples,
algorithm='ball_tree', p=p).fit(X)
core_sample_2, labels_2 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_2)
assert_array_equal(labels_1, labels_2)
# Minkowski with p=1 should be equivalent to Manhattan distance
db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples,
algorithm='ball_tree').fit(X)
core_sample_3, labels_3 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_3)
assert_array_equal(labels_1, labels_3)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
def test_dbscan_precomputed_metric_with_initial_rows_zero():
# sample matrix with initial two row all zero
ar = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0]
])
matrix = sparse.csr_matrix(ar)
labels = DBSCAN(eps=0.2, metric='precomputed',
min_samples=2).fit(matrix).labels_
assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
| bsd-3-clause |
yichiliao/yichiliao.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
jlegendary/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
scottpurdy/nupic.fluent | fluent/models/classification_model.py | 1 | 7033 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import pandas
from collections import Counter
class ClassificationModel(object):
"""
Base class for NLP models of classification tasks. When inheriting from this
class please take note of which methods MUST be overridden, as documented
below.
The Model superclass implements:
- evaluateTrialResults() calcualtes result stats
- evaluateResults() calculates result stats for a list of trial results
- printTrialReport() prints classifications of an evaluation trial
- printFinalReport() prints evaluation metrics and confusion matrix
- densifyPattern() returns a binary SDR vector for a given bitmap
Methods/properties that must be implemented by subclasses:
- encodePattern()
- trainModel()
- testModel()
"""
def __init__(self, verbosity=1):
self.verbosity = verbosity
def classifyRandomly(self, labels):
"""Return accuracy of random classifications for the labels."""
randomLabels = numpy.random.randint(0, labels.max(), labels.shape)
return (randomLabels == labels).sum() / float(labels.shape[0])
def evaluateTrialResults(self, classifications, references, idx): ## TODO: evaluation metrics for multiple classifcations
"""
Calculate statistics for the predicted classifications against the actual.
@param classifications (list) Two lists: (0) predictions and (1)
actual classifications. Items in
the predictions list are lists of
ints or None, and items in actual
classifications list are ints.
@param references (list) Classification label strings.
@return (tuple) Returns a 2-item tuple w/ the
accuracy (float) and confusion
matrix (numpy array).
"""
if len(classifications[0]) != len(classifications[1]):
raise ValueError("Classification lists must have same length.")
if self.verbosity > 0:
self._printTrialReport(classifications, references, idx)
actual = numpy.array(classifications[1])
predicted = numpy.array([c[0] for c in classifications[0]]) ## TODO: see above; this forces evaluation metrics to consider only the first predicted classification
accuracy = (actual == predicted).sum() / float(len(actual))
# Calculate confusion matrix.
total = len(references)
cm = numpy.zeros((total, total+1))
for i, p in enumerate(predicted):
if p:
cm[actual[i]][p] += 1
else:
# No predicted label, so increment the "(none)" column.
cm[actual[i]][total] += 1
cm = numpy.vstack((cm, numpy.sum(cm, axis=0)))
cm = numpy.hstack((cm, numpy.sum(cm, axis=1).reshape(total+1,1)))
cm = pandas.DataFrame(
data=cm,
columns=references+["(none)"]+["Actual Totals"],
index=references+["Prediction Totals"])
return (accuracy, cm)
def evaluateFinalResults(self, intermResults):
"""
Cumulative statistics for the outputs of evaluateTrialResults().
@param intermResults (list) List of returned results from
evaluateTrialResults().
@return (list) Returns a dictionary with entries
for max, mean, and min accuracies,
and the mean confusion matrix.
"""
accuracy = []
cm = numpy.zeros((intermResults[0][1].shape))
# Find mean, max, and min values for the metrics.
for result in intermResults:
accuracy.append(result[0])
cm = numpy.add(cm, result[1])
## TODO: add rows for Precision and Recall
results = {"max_accuracy":max(accuracy),
"mean_accuracy":sum(accuracy)/float(len(accuracy)),
"min_accuracy":min(accuracy),
"total_cm":cm}
if self.verbosity > 0:
self._printFinalReport(results)
return results
@staticmethod
def _printTrialReport(labels, refs, idx):
"""Print columns for sample #, actual label, and predicted label."""
template = "{0:10}|{1:30}|{2:30}"
print "Evaluation results for this fold:"
print template.format("#", "Actual", "Predicted")
for i in xrange(len(labels[0])):
if labels[0][i][0] == None:
print template.format(idx[i], refs[labels[1][i]], "(none)")
else:
print template.format(
idx[i], refs[labels[1][i]], [refs[l] for l in labels[0][i]])
@staticmethod
def _printFinalReport(results): ## TODO: pprint
"""Prints results as returned by evaluateResults()."""
print "---------- RESULTS ----------"
print "max, mean, min accuracies = "
print "{0:.3f}, {1:.3f}, {2:.3f}".format(
results["max_accuracy"], results["mean_accuracy"], results["min_accuracy"])
print "total confusion matrix =\n", results["total_cm"]
def _densifyPattern(self, bitmap):
"""Return a numpy array of 0s and 1s to represent the input bitmap."""
densePattern = numpy.zeros(self.n)
densePattern[bitmap] = 1.0
return densePattern
def _winningLabels(self, labels, n=3):
"""
Returns the most frequent item in the input list of labels. If there are
ties for the most frequent item, the x most frequent are returned,
where x<=n.
"""
labelCount = Counter(labels).most_common()
maxCount = 0
for c in labelCount: ## TODO: better way to do this?
if c[1] > maxCount:
maxCount = c[1]
winners = [c[0] for c in labelCount if c[1]==maxCount]
return winners if len(winners) <= n else winners[:n]
def encodePattern(self, pattern):
raise NotImplementedError
def resetModel(self):
raise NotImplementedError
def trainModel(self, sample, label):
raise NotImplementedError
def testModel(self, sample):
raise NotImplementedError
| gpl-3.0 |
abelcarreras/aiida_extensions | workflows/tools/plot_phonon_info.py | 1 | 3164 | from aiida import load_dbenv
load_dbenv()
from aiida.orm import load_node, load_workflow
from aiida.orm import Code, DataFactory
import matplotlib.pyplot as plt
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
KpointsData = DataFactory('array.kpoints')
import numpy as np
# Set WorkflowPhonon PK number
########################
wf = load_workflow(9905)
########################
# Phonon Band structure
bs = wf.get_result('band_structure')
for i, freq in enumerate(bs.get_array('frequencies')):
plt.plot(bs.get_array('q_path')[i], freq, color='r')
plt.figure(1)
plt.axes().get_xaxis().set_ticks([])
plt.ylabel('Frequency [THz]')
plt.xlabel('Wave vector')
plt.xlim([0, bs.get_array('q_path')[-1][-1]])
plt.axhline(y=0, color='k', ls='dashed')
plt.suptitle('Phonon band structure')
if 'labels' in bs.get_arraynames():
plt.rcParams.update({'mathtext.default': 'regular' })
labels = bs.get_array('labels')
labels_e = []
x_labels = []
for i, freq in enumerate(bs.get_array('q_path')):
if labels[i][0] == labels[i-1][1]:
labels_e.append('$'+labels[i][0].replace('GAMMA', '\Gamma')+'$')
else:
labels_e.append('$'+labels[i-1][1].replace('GAMMA', '\Gamma')+'/'+labels[i][0].replace('GAMMA', '\Gamma')+'$')
x_labels.append(bs.get_array('q_path')[i][0])
x_labels.append(bs.get_array('q_path')[-1][-1])
labels_e.append('$'+labels[-1][1].replace('GAMMA', '\Gamma')+'$')
labels_e[0]='$'+labels[0][0].replace('GAMMA', '\Gamma')+'$'
plt.xticks(x_labels, labels_e, rotation='horizontal')
# plt.show()
# Phonon density of states
dos = wf.get_result('dos')
frequency = dos.get_array('frequency')
total_dos = dos.get_array('total_dos')
partial_dos = dos.get_array('partial_dos')
partial_symbols = dos.get_array('partial_symbols')
# Check atom equivalences
delete_list = []
for i, dos_i in enumerate(partial_dos):
for j, dos_j in enumerate(partial_dos):
if i < j:
if np.allclose(dos_i, dos_j, rtol=1, atol=1e-8) and partial_symbols[i] == partial_symbols[j]:
dos_i += dos_j
delete_list.append(j)
partial_dos = np.delete(partial_dos, delete_list, 0)
partial_symbols = np.delete(partial_symbols, delete_list)
plt.figure(2)
plt.suptitle('Phonon density of states')
plt.ylabel('Density')
plt.xlabel('Frequency [THz]')
plt.ylim([0, np.max(total_dos)*1.1])
plt.plot(frequency, total_dos, label='Total DOS')
for i, dos in enumerate(partial_dos):
plt.plot(frequency, dos, label='{}'.format(partial_symbols[i]))
plt.legend()
#plt.show()
# Thermal properties
thermal = wf.get_result('thermal_properties')
free_energy = thermal.get_array('free_energy')
entropy = thermal.get_array('entropy')
temperature = thermal.get_array('temperature')
cv = thermal.get_array('cv')
plt.figure(3)
plt.xlabel('Temperature [K]')
plt.suptitle('Thermal properties (per unit cell)')
plt.plot(temperature, free_energy, label='Free energy (KJ/mol)')
plt.plot(temperature, entropy, label='entropy (KJ/mol)')
plt.plot(temperature, cv, label='Cv (J/mol)')
plt.legend()
plt.show()
| mit |
0x90/skybluetero | plotter.py | 3 | 4319 | # Copyright (c) 2009 Emiliano Pastorino <[email protected]>
#
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import matplotlib
matplotlib.use('GTKAgg')
matplotlib.interactive(True)
import matplotlib.pyplot as pyplot
import matplotlib.pylab as pylab
class Plotter():
def __init__(self,xval,yval,tags,title):
self.colors = ('#ff0000','#00ff00','#0000ff','#ffff00','#ff00ff','#00ffff','#800000','#008000','#000080','#808000','#800080','#008080','#ff8000','#ff0080','#00ff80','#ff00c0','#ffc000','#c0ff00','#00ffc0','#00c0ff')
self.xval = xval
self.yval = yval
self.tags = tags
self.title = title
def simpleplot(self):
fig = pyplot.figure()
j=0
lineas=[]
for i in self.yval:
linea = pyplot.plot(self.xval,i,self.colors[j])
lineas.append(linea[0])
j=j+1
if j>19:
j = 0
pyplot.legend(lineas,self.tags,'best')
pyplot.xlabel('Time (s)')
pyplot.ylabel('Airtime consumption (%)')
pyplot.title(self.title)
pyplot.grid(True)
pyplot.show()
def stackareaplot(self):
fig = pyplot.figure()
lineas=[]
stack = []
k=0
for i in self.yval:
stack.append(i)
m=0
for j in i:
if k > 0:
stack[k][m]=stack[k][m]+stack[k-1][m]
m = m+1
k=k+1
j=0
xss=[]
yss=[]
used_colors=[]
for i in stack:
linea = pyplot.plot(self.xval,i,self.colors[j])
used_colors.append(self.colors[j])
lineas.append(linea[0])
xs,ys = pylab.poly_between(self.xval,0,i)
xss.append(xs)
yss.append(ys)
j=j+1
if j>19:
j = 0
j=0
k=-1
used_colors_inv = used_colors[::-1]
for i in yss:
pylab.fill(xss[0], yss[k], used_colors_inv[j])
j=j+1
k=k-1
pyplot.legend(lineas,self.tags,'best')
pyplot.title(self.title)
pyplot.xlabel('Time (s)')
pyplot.ylabel('Airtime consumption (%)')
pyplot.grid(True)
pyplot.show()
class Test():
def simpleplot(self):
plt = Plotter([0,20],[[0,1],[0,2],[0,3],[0,4],[0,5],[0,6],[0,7],[0,8],[0,9],[0,10],[0,11],[0,12],[0,13],[0,14],[0,15],[0,16],[0,17],[0,18],[0,19],[0,20]],['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t'],'hola')
plt.simpleplot()
# def stackareaplot(self):
# plt = Plotter([0,20],[[0,1],[0,2],[0,3],[0,4],[0,5],[0,6],[0,7],[0,8],[0,9],[0,10],[0,11],[0,12],[0,13],[0,14],[0,15],[0,16],[0,17],[0,18],[0,19],[0,20]],['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t'],'hola')
# plt.stackareaplot()
def stackareaplot(self):
plt = Plotter([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300],[[0.010666335555555581, 0.0088586274074074108, 0.0059840918518518455, 0.00031296111111111114, 0.0086995377777777903, 0.010214910185185212, 0.0044641161111111079, 0.038744648888889052, 0.021350007407407605, 0.002331960185185185, 0.0013770194444444447, 0.00067598759259259258, 8.3303703703703709e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0.010666335555555581, 0.0088586274074074108, 0.0059840918518518455, 0.00031296111111111114, 0.0086995377777777903, 0.010214910185185212, 0.0044641161111111079, 0.038744648888889052, 0.021350007407407605, 0.002331960185185185, 0.0013770194444444447, 0.00067598759259259258, 8.3303703703703709e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],['a','b'],'hola')
plt.stackareaplot()
| mit |
Edouard360/text-mining-challenge | main.py | 1 | 4387 | """
Module docstring
"""
from time import localtime, strftime
import pandas as pd
from sklearn import metrics
from classifier import Classifier
from featureEngineering.FeatureExporter import FeatureExporter
from featureEngineering.FeatureImporter import FeatureImporter
from tools import random_sample
time_sub = strftime("%Y-%m-%d %H:%M:%S", localtime()).replace(' ', '__')
train_df = pd.read_csv("data/training_set.txt", sep=" ", header=None)
train_df.columns = ["source", "target", "label"]
test_df = pd.read_csv("data/testing_set.txt", sep=" ", header=None)
test_df.columns = ["source", "target"]
node_information_df = pd.read_csv("data/node_information.csv", sep=",", header=None)
node_information_df.columns = ["ID", "year", "title", "authors", "journalName", "abstract"]
node_information_df = node_information_df.reset_index().set_index("ID")
node_information_df["authors"].fillna("", inplace=True)
node_information_df["journalName"].fillna("", inplace=True)
df_dict = dict()
training_set_percentage = 0.05
df_dict["train"] = {
"filename": 'training_set.txt',
"df": random_sample(train_df, p=training_set_percentage)
}
testing_on_train = True
early_stopping = False
features = ["graphAuthors", "graphArticle", 'original', "similarity", "journal", "lsa"]
# features = []
verbose = True
freq = 10000
# By uncommenting you can tune in the parameters, for building the graph
parameters = {}
# parameters = {"percentile":95,"metric":"degrees"}
if testing_on_train:
df_dict["test"] = {
"filename": 'testing_training_set.txt',
"df": random_sample(train_df, p=0.05, seed=43)
}
else:
df_dict["test"] = {
"filename": 'testing_set.txt',
"df": test_df
}
exporter = FeatureExporter(verbose=verbose, freq=freq)
for key, value in df_dict.items():
if not FeatureImporter.check(value["filename"], features=features, **parameters):
for feature in features:
if not FeatureImporter.check(value["filename"], features=[feature], **parameters):
print("Exporting for " + key + " the feature " + feature)
exporter.computeFeature(value["df"], node_information_df, feature, **parameters)
exporter.exportTo(value["filename"], feature, **parameters)
training_features = FeatureImporter.importFromFile(df_dict["train"]["filename"], features=features, **parameters)
# training_features = training_features[:,5].reshape(-1,1)
# training_features_1 = training_features[:,0:2].reshape(-1,2)
# training_features_2 = training_features[:,3:]
# training_features = np.concatenate((training_features_1,training_features_2),axis = 1)
testing_features = FeatureImporter.importFromFile(df_dict["test"]["filename"], features=features, **parameters)
# testing_features = testing_features[:,5].reshape(-1,1)
# testing_features_1 = testing_features[:,0:2].reshape(-1,2)
# testing_features_2 = testing_features[:,3:]
# testing_features = np.concatenate((testing_features_1,testing_features_2),axis = 1)
labels = df_dict["train"]["df"]["label"].values
classifier = Classifier()
# classifier = LogisticRegression()
# classifier = RandomForestClassifier(n_estimators=100, random_state=42)
if testing_on_train:
labels_true = df_dict["test"]["df"]["label"].values
if not early_stopping:
classifier.fit(training_features, labels)
labels_pred = classifier.predict(testing_features)
print("Features : ", features)
if hasattr(classifier, 'name'):
print("Classifier : ", classifier.name)
else:
print("Classifier : ", str(classifier))
print("f1 score is %f | %.2f of training set" % (
metrics.f1_score(labels_true, labels_pred), training_set_percentage))
else:
plot_curves = False
eval_set = [(training_features, labels),
(testing_features, labels_true)]
if plot_curves:
classifier.plotlearningcurves(eval_set)
else:
classifier.early_stop(eval_set)
else:
classifier.fit(training_features, labels)
labels_pred = classifier.predict(testing_features)
prediction_df = pd.DataFrame(columns=["id", "category"], dtype=int)
prediction_df["id"] = range(len(labels_pred))
prediction_df["category"] = labels_pred
prediction_df.to_csv("submissions/improved_predictions_of_" + time_sub + ".csv", index=None)
| apache-2.0 |
tbereau/espresso | samples/python/electrophoresis.py | 1 | 8509 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import code_info
from espressomd import thermostat
from espressomd import integrate
from espressomd import interactions
from espressomd import electrostatics
import sys
import numpy as np
import cPickle as pickle
import os
print(code_info.features())
# Seed
#############################################################
np.random.seed(42)
# System parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
system.box_l = [100, 100, 100]
system.periodic = [1,1,1]
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# system.cell_system.set_n_square(use_verlet_lists=False)
system.max_num_cells = 2744
# Non-bonded interactions
###############################################################
# WCA between monomers
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA counterions - polymer
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA coions - polymer
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA between ions
system.non_bonded_inter[1, 2].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# Bonded interactions
################################################################
# fene = interactions.FeneBond(k=10, d_r_max=2)
# system.bonded_inter.add(fene)
harmonic = interactions.HarmonicBond(k=10, r_0=2)
harmonicangle = interactions.Angle_Harmonic(bend=10, phi0=np.pi)
system.bonded_inter.add(harmonic)
system.bonded_inter.add(harmonicangle)
# Create Monomer beads and bonds
#########################################################################################
n_monomers = 20
init_polymer_pos=np.dstack((np.arange(n_monomers),np.zeros(n_monomers),np.zeros(n_monomers)))[0]+np.array([system.box_l[0]/2-n_monomers/2, system.box_l[1]/2, system.box_l[2]/2])
system.part.add(id=np.arange(n_monomers), pos=init_polymer_pos)
system.part[:-1].add_bond((harmonic, np.arange(n_monomers)[1:]))
system.part[1:-1].add_bond((harmonicangle, np.arange(n_monomers)[:-2], np.arange(n_monomers)[2:]))
# Particle creation with loops:
# for i in range(n_monomers):
# if i > 0:
# system.part[i].add_bond((harmonic, i - 1))
# for i in range(1,n_monomers-1):
# system.part[i].add_bond((harmonicangle,i - 1, i + 1))
system.part[:n_monomers].q = -np.ones(n_monomers)
# Create counterions
###################################################################
system.part.add(pos=np.random.random((n_monomers,3)) * system.box_l,
q=np.ones(n_monomers),
type=np.ones(n_monomers))
# Create ions
###############################################################
n_ions = 100
system.part.add(pos=np.random.random((n_ions,3)) * system.box_l,
q=np.hstack((np.ones(n_ions/2),-np.ones(n_ions/2))),
type=np.hstack((np.ones(n_ions/2),2*np.ones(n_ions/2))))
# Sign charges to particles after the particle creation:
# system.part[2*n_monomers:2*n_monomers+n_ions/2] = np.ones(n_ions/2)
# system.part[2*n_monomers+n_ions/2:] = -np.ones(n_ions/2)
print("types:", system.part[:].type)
print("")
print("Q_tot:", np.sum(system.part[:].q))
#############################################################
# Warmup #
#############################################################
system.non_bonded_inter.set_force_cap(10)
for i in range(1000):
sys.stdout.write("\rWarmup: %03i"%i)
sys.stdout.flush()
integrate.integrate(1)
system.non_bonded_inter.set_force_cap(10*i)
system.non_bonded_inter.set_force_cap(0)
print("\nWarmup finished!\n")
#############################################################
# Sampling #
#############################################################
#
# Activate electostatic with checkpoint example
#############################################################
read_checkpoint = False
# Load checkpointed p3m class
if os.path.isfile("p3m_checkpoint") and read_checkpoint == True:
print("reading p3m from file")
p3m = pickle.load(open("p3m_checkpoint","r"))
else:
p3m = electrostatics.P3M(bjerrum_length=1.0, accuracy=1e-2)
print("Tuning P3M")
system.actors.add(p3m)
# Checkpoint AFTER tuning (adding method to actors)
pickle.dump(p3m,open("p3m_checkpoint","w"),-1)
print("P3M parameter:\n")
p3m_params = p3m.get_params()
for key in p3m_params.keys():
print("{} = {}".format(key, p3m_params[key]))
print(system.actors)
# Apply external Force
#############################################################
n_part = system.n_part
system.part[:].ext_force = np.dstack((system.part[:].q * np.ones(n_part), np.zeros(n_part), np.zeros(n_part)))[0]
# print(system.part[:].ext_force)
# Activate LB
############################################################
# lbf = lb.LBF(dens=1, tau=0.01, visc=1, fric=1, agrid=1)
# system.actors.add(lbf)
# Data arrays
v_list = []
pos_list = []
# Sampling Loop
for i in range(4000):
sys.stdout.write("\rSampling: %04i"%i)
sys.stdout.flush()
integrate.integrate(1)
v_list.append(system.part[:n_monomers].v)
pos_list.append(system.part[:n_monomers].pos)
# other observales:
print("\nSampling finished!\n")
# Data evaluation
############################################################
# Convert data to numpy arrays
# shape = [time_step, monomer, coordinate]!
v_list = np.array(v_list)
pos_list = np.array(pos_list)
# Calculate COM and COM velocity
COM = pos_list.sum(axis=1)/n_monomers
COM_v = (COM[1:] - COM[:-1])/system.time_step
# Calculate the Mobility mu = v/E
##################################
mu = COM_v.mean()/1.0
print("MOBILITY", mu)
# Calculate the Persistence length
# fits better for longer sampling
##################################
# this calculation method requires
# numpy 1.10 or higher
if float(np.version.version.split(".")[1]) >= 10:
from scipy.optimize import curve_fit
from numpy.linalg import norm
# First get bond vectors
bond_vec = pos_list[:,1:,:] - pos_list[:,:-1,:]
bond_abs = norm(bond_vec, axis=2, keepdims=True)
bond_abs_avg = bond_abs.mean(axis=0)[:,0]
c_length = bond_abs_avg
for i in range(1,len(bond_abs_avg)):
c_length[i] += c_length[i-1]
bv_norm = bond_vec / bond_abs
bv_zero = np.empty_like(bv_norm)
for i in range(bv_zero.shape[1]):
bv_zero[:,i,:] = bv_norm[:,0,:]
# Calculate <cos(theta)>
cos_theta = (bv_zero*bv_norm).sum(axis=2).mean(axis=0)
def decay(x,lp):
return np.exp(-x/lp)
fit,_ = curve_fit(decay, c_length, cos_theta)
print c_length.shape, cos_theta.shape
print "PERSISTENCE LENGTH", fit[0]
# Plot Results
############################################################
import matplotlib.pyplot as pp
direction = ["x", "y", "z"]
fig1=pp.figure()
ax=fig1.add_subplot(111)
for i in range(3):
ax.plot(COM[:-500,i], label="COM pos %s" %direction[i])
ax.legend(loc="best")
ax.set_xlabel("time_step")
ax.set_ylabel("r")
fig2=pp.figure()
ax=fig2.add_subplot(111)
for i in range(3):
ax.plot(COM_v[:-500,i], label="COM v %s" %direction[i])
ax.legend(loc="best")
ax.set_xlabel("time_step")
ax.set_ylabel("v")
if float(np.version.version.split(".")[1]) >= 10:
fig3=pp.figure()
ax=fig3.add_subplot(111)
ax.plot(c_length, cos_theta, label="sim data")
ax.plot(c_length, decay(c_length, fit[0]), label="fit")
ax.legend(loc="best")
ax.set_xlabel("contour length")
ax.set_ylabel("<cos(theta)>")
pp.show()
print("\nJob finished!\n")
| gpl-3.0 |
bennlich/scikit-image | doc/ext/notebook.py | 44 | 3042 | __all__ = ['python_to_notebook', 'Notebook']
import json
import copy
import warnings
# Skeleton notebook in JSON format
skeleton_nb = """{
"metadata": {
"name":""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}"""
class Notebook(object):
"""
Notebook object for building an IPython notebook cell-by-cell.
"""
def __init__(self):
# cell type code
self.cell_code = {
'cell_type': 'code',
'collapsed': False,
'input': [
'# Code Goes Here'
],
'language': 'python',
'metadata': {},
'outputs': []
}
# cell type markdown
self.cell_md = {
'cell_type': 'markdown',
'metadata': {},
'source': [
'Markdown Goes Here'
]
}
self.template = json.loads(skeleton_nb)
self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
def add_cell(self, value, cell_type='code'):
"""Add a notebook cell.
Parameters
----------
value : str
Cell content.
cell_type : {'code', 'markdown'}
Type of content (default is 'code').
"""
if cell_type in ['markdown', 'code']:
key = self.valuetype_to_celltype[cell_type]
cells = self.template['worksheets'][0]['cells']
cells.append(copy.deepcopy(self.cell_type[key]))
# assign value to the last cell
cells[-1][key] = value
else:
warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
def json(self):
"""Return a JSON representation of the notebook.
Returns
-------
str
JSON notebook.
"""
return json.dumps(self.template, indent=2)
def test_notebook_basic():
nb = Notebook()
assert(json.loads(nb.json()) == json.loads(skeleton_nb))
def test_notebook_add():
nb = Notebook()
str1 = 'hello world'
str2 = 'f = lambda x: x * x'
nb.add_cell(str1, cell_type='markdown')
nb.add_cell(str2, cell_type='code')
d = json.loads(nb.json())
cells = d['worksheets'][0]['cells']
values = [c['input'] if c['cell_type'] == 'code' else c['source']
for c in cells]
assert values[1] == str1
assert values[2] == str2
assert cells[1]['cell_type'] == 'markdown'
assert cells[2]['cell_type'] == 'code'
if __name__ == "__main__":
import numpy.testing as npt
npt.run_module_suite()
| bsd-3-clause |
Newlife005/nestle | examples/plot_line.py | 5 | 1484 | """
====
Line
====
Example of fitting a straight line to some data.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import corner
import nestle
np.random.seed(0)
def model(theta, x):
m, c = theta
return m*x + c
# Generate some data
theta_true = [0.5, 10.0]
N = 50
x = np.sort(10*np.random.rand(N))
y = model(theta_true, x)
yerr = 0.1+0.5*np.random.rand(N)
y += yerr * np.random.randn(N)
# The likelihood function:
def loglike(theta):
return -0.5*(np.sum((y-model(theta, x))**2/yerr**2))
# Defines a flat prior in 0 < m < 1, 0 < b < 100:
def prior_transform(theta):
return np.array([1., 100.]) * theta
# Run nested sampling
res = nestle.sample(loglike, prior_transform, 2, method='single',
npoints=1000)
print(res.summary())
# weighted average and covariance:
p, cov = nestle.mean_and_cov(res.samples, res.weights)
print("m = {0:5.2f} +/- {1:5.2f}".format(p[0], np.sqrt(cov[0, 0])))
print("b = {0:5.2f} +/- {1:5.2f}".format(p[1], np.sqrt(cov[1, 1])))
plt.figure()
plt.errorbar(x, y, yerr=yerr, capsize=0, fmt='k.', ecolor='.7')
plt.plot([0., 10.], model(p, np.array([0., 10.])), c='k')
plt.show()
###############################################################################
# Plot samples to see the full posterior surface.
fig = corner.corner(res.samples, weights=res.weights, labels=['m', 'b'],
range=[0.99999, 0.99999], truths=theta_true, bins=30)
plt.show()
| mit |
mblondel/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
thebucknerlife/caravel | caravel/forms.py | 1 | 25665 | """Contains the logic to create cohesive forms on the explore view"""
from wtforms import (
Form, SelectMultipleField, SelectField, TextField, TextAreaField,
BooleanField, IntegerField, HiddenField)
from wtforms import validators, widgets
from copy import copy
from caravel import app
from collections import OrderedDict
config = app.config
class BetterBooleanField(BooleanField):
"""Fixes the html checkbox to distinguish absent from unchecked
(which doesn't distinguish False from NULL/missing )
If value is unchecked, this hidden <input> fills in False value
"""
def __call__(self, **kwargs):
html = super(BetterBooleanField, self).__call__(**kwargs)
html += u'<input type="hidden" name="{}" value="false">'.format(self.name)
return widgets.HTMLString(html)
class SelectMultipleSortableField(SelectMultipleField):
"""Works along with select2sortable to preserves the sort order"""
def iter_choices(self):
d = OrderedDict()
for value, label in self.choices:
selected = self.data is not None and self.coerce(value) in self.data
d[value] = (value, label, selected)
if self.data:
for value in self.data:
if value:
yield d.pop(value)
while d:
yield d.popitem(last=False)[1]
class FreeFormSelect(widgets.Select):
"""A WTF widget that allows for free form entry"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
if self.multiple:
kwargs['multiple'] = True
html = ['<select %s>' % widgets.html_params(name=field.name, **kwargs)]
found = False
for val, label, selected in field.iter_choices():
html.append(self.render_option(val, label, selected))
if field.data and val == field.data:
found = True
if not found:
html.insert(1, self.render_option(field.data, field.data, True))
html.append('</select>')
return widgets.HTMLString(''.join(html))
class FreeFormSelectField(SelectField):
"""A WTF SelectField that allows for free form input"""
widget = FreeFormSelect()
def pre_validate(self, form):
return
class OmgWtForm(Form):
"""Caravelification of the WTForm Form object"""
fieldsets = {}
css_classes = dict()
def get_field(self, fieldname):
return getattr(self, fieldname)
def field_css_classes(self, fieldname):
print(fieldname, self.css_classes[fieldname])
if fieldname in self.css_classes:
return " ".join(self.css_classes[fieldname])
return ""
class FormFactory(object):
"""Used to create the forms in the explore view dynamically"""
series_limits = [0, 5, 10, 25, 50, 100, 500]
fieltype_class = {
SelectField: 'select2',
SelectMultipleField: 'select2',
FreeFormSelectField: 'select2_freeform',
SelectMultipleSortableField: 'select2Sortable',
}
def __init__(self, viz):
self.viz = viz
from caravel.viz import viz_types
viz = self.viz
datasource = viz.datasource
default_metric = datasource.metrics_combo[0][0]
default_groupby = datasource.groupby_column_names[0]
group_by_choices = [(s, s) for s in datasource.groupby_column_names]
# Pool of all the fields that can be used in Caravel
self.field_dict = {
'viz_type': SelectField(
'Viz',
default='table',
choices=[(k, v.verbose_name) for k, v in viz_types.items()],
description="The type of visualization to display"),
'metrics': SelectMultipleSortableField(
'Metrics', choices=datasource.metrics_combo,
default=[default_metric],
description="One or many metrics to display"),
'metric': SelectField(
'Metric', choices=datasource.metrics_combo,
default=default_metric,
description="Chose the metric"),
'stacked_style': SelectField(
'Chart Style', choices=self.choicify(
['stack', 'stream', 'expand']),
default='stack',
description=""),
'linear_color_scheme': SelectField(
'Color Scheme', choices=self.choicify([
'fire', 'blue_white_yellow', 'white_black',
'black_white']),
default='fire',
description=""),
'normalize_across': SelectField(
'Normalize Across', choices=self.choicify([
'heatmap', 'x', 'y']),
default='heatmap',
description=(
"Color will be rendered based on a ratio "
"of the cell against the sum of across this "
"criteria")),
'canvas_image_rendering': SelectField(
'Rendering', choices=(
('pixelated', 'pixelated (Sharp)'),
('auto', 'auto (Smooth)'),
),
default='pixelated',
description=(
"image-rendering CSS attribute of the canvas object that "
"defines how the browser scales up the image")),
'xscale_interval': SelectField(
'XScale Interval', choices=self.choicify(range(1, 50)),
default='1',
description=(
"Number of step to take between ticks when "
"printing the x scale")),
'yscale_interval': SelectField(
'YScale Interval', choices=self.choicify(range(1, 50)),
default='1',
description=(
"Number of step to take between ticks when "
"printing the y scale")),
'bar_stacked': BetterBooleanField(
'Stacked Bars',
default=False,
description=""),
'secondary_metric': SelectField(
'Color Metric', choices=datasource.metrics_combo,
default=default_metric,
description="A metric to use for color"),
'country_fieldtype': SelectField(
'Country Field Type',
default='cca2',
choices=(
('name', 'Full name'),
('cioc', 'code International Olympic Committee (cioc)'),
('cca2', 'code ISO 3166-1 alpha-2 (cca2)'),
('cca3', 'code ISO 3166-1 alpha-3 (cca3)'),
),
description=(
"The country code standard that Caravel should expect "
"to find in the [country] column")),
'groupby': SelectMultipleSortableField(
'Group by',
choices=self.choicify(datasource.groupby_column_names),
description="One or many fields to group by"),
'columns': SelectMultipleSortableField(
'Columns',
choices=self.choicify(datasource.groupby_column_names),
description="One or many fields to pivot as columns"),
'all_columns': SelectMultipleSortableField(
'Columns',
choices=self.choicify(datasource.column_names),
description="Columns to display"),
'all_columns_x': SelectField(
'X',
choices=self.choicify(datasource.column_names),
description="Columns to display"),
'all_columns_y': SelectField(
'Y',
choices=self.choicify(datasource.column_names),
description="Columns to display"),
'granularity': FreeFormSelectField(
'Time Granularity', default="one day",
choices=self.choicify([
'all',
'5 seconds',
'30 seconds',
'1 minute',
'5 minutes',
'1 hour',
'6 hour',
'1 day',
'7 days',
]),
description=(
"The time granularity for the visualization. Note that you "
"can type and use simple natural language as in '10 seconds', "
"'1 day' or '56 weeks'")),
'link_length': FreeFormSelectField(
'Link Length', default="200",
choices=self.choicify([
'10',
'25',
'50',
'75',
'100',
'150',
'200',
'250',
]),
description="Link length in the force layout"),
'charge': FreeFormSelectField(
'Charge', default="-500",
choices=self.choicify([
'-50',
'-75',
'-100',
'-150',
'-200',
'-250',
'-500',
'-1000',
'-2500',
'-5000',
]),
description="Charge in the force layout"),
'granularity_sqla': SelectField(
'Time Column',
default=datasource.main_dttm_col or datasource.any_dttm_col,
choices=self.choicify(datasource.dttm_cols),
description=(
"The time column for the visualization. Note that you "
"can define arbitrary expression that return a DATETIME "
"column in the table editor. Also note that the "
"filter bellow is applied against this column or "
"expression")),
'resample_rule': FreeFormSelectField(
'Resample Rule', default='',
choices=self.choicify(('1T', '1H', '1D', '7D', '1M', '1AS')),
description=("Pandas resample rule")),
'resample_how': FreeFormSelectField(
'Resample How', default='',
choices=self.choicify(('', 'mean', 'sum', 'median')),
description=("Pandas resample how")),
'resample_fillmethod': FreeFormSelectField(
'Resample Fill Method', default='',
choices=self.choicify(('', 'ffill', 'bfill')),
description=("Pandas resample fill method")),
'since': FreeFormSelectField(
'Since', default="7 days ago",
choices=self.choicify([
'1 hour ago',
'12 hours ago',
'1 day ago',
'7 days ago',
'28 days ago',
'90 days ago',
'1 year ago'
]),
description=(
"Timestamp from filter. This supports free form typing and "
"natural language as in '1 day ago', '28 days' or '3 years'")),
'until': FreeFormSelectField(
'Until', default="now",
choices=self.choicify([
'now',
'1 day ago',
'7 days ago',
'28 days ago',
'90 days ago',
'1 year ago'])
),
'max_bubble_size': FreeFormSelectField(
'Max Bubble Size', default="25",
choices=self.choicify([
'5',
'10',
'15',
'25',
'50',
'75',
'100',
])
),
'row_limit':
FreeFormSelectField(
'Row limit',
default=config.get("ROW_LIMIT"),
choices=self.choicify(
[10, 50, 100, 250, 500, 1000, 5000, 10000, 50000])),
'limit':
FreeFormSelectField(
'Series limit',
choices=self.choicify(self.series_limits),
default=50,
description=(
"Limits the number of time series that get displayed")),
'rolling_type': SelectField(
'Rolling',
default='None',
choices=[(s, s) for s in ['None', 'mean', 'sum', 'std', 'cumsum']],
description=(
"Defines a rolling window function to apply, works along "
"with the [Periods] text box")),
'rolling_periods': IntegerField(
'Periods',
validators=[validators.optional()],
description=(
"Defines the size of the rolling window function, "
"relative to the time granularity selected")),
'series': SelectField(
'Series', choices=group_by_choices,
default=default_groupby,
description=(
"Defines the grouping of entities. "
"Each serie is shown as a specific color on the chart and "
"has a legend toggle")),
'entity': SelectField(
'Entity', choices=group_by_choices,
default=default_groupby,
description="This define the element to be plotted on the chart"),
'x': SelectField(
'X Axis', choices=datasource.metrics_combo,
default=default_metric,
description="Metric assigned to the [X] axis"),
'y': SelectField(
'Y Axis', choices=datasource.metrics_combo,
default=default_metric,
description="Metric assigned to the [Y] axis"),
'size': SelectField(
'Bubble Size',
default=default_metric,
choices=datasource.metrics_combo),
'url': TextField(
'URL', default='www.airbnb.com',),
'where': TextField(
'Custom WHERE clause', default='',
description=(
"The text in this box gets included in your query's WHERE "
"clause, as an AND to other criteria. You can include "
"complex expression, parenthesis and anything else "
"supported by the backend it is directed towards.")),
'having': TextField(
'Custom HAVING clause', default='',
description=(
"The text in this box gets included in your query's HAVING"
" clause, as an AND to other criteria. You can include "
"complex expression, parenthesis and anything else "
"supported by the backend it is directed towards.")),
'compare_lag': TextField(
'Comparison Period Lag',
description=(
"Based on granularity, number of time periods to "
"compare against")),
'compare_suffix': TextField(
'Comparison suffix',
description="Suffix to apply after the percentage display"),
'x_axis_format': FreeFormSelectField(
'X axis format',
default='smart_date',
choices=[
('smart_date', 'Adaptative formating'),
("%m/%d/%Y", '"%m/%d/%Y" | 01/14/2019'),
("%Y-%m-%d", '"%Y-%m-%d" | 2019-01-14'),
("%Y-%m-%d %H:%M:%S",
'"%Y-%m-%d %H:%M:%S" | 2019-01-14 01:32:10'),
("%H:%M:%S", '"%H:%M:%S" | 01:32:10'),
],
description="D3 format syntax for y axis "
"https://github.com/mbostock/\n"
"d3/wiki/Formatting"),
'y_axis_format': FreeFormSelectField(
'Y axis format',
default='.3s',
choices=[
('.3s', '".3s" | 12.3k'),
('.3%', '".3%" | 1234543.210%'),
('.4r', '".4r" | 12350'),
('.3f', '".3f" | 12345.432'),
('+,', '"+," | +12,345.4321'),
('$,.2f', '"$,.2f" | $12,345.43'),
],
description="D3 format syntax for y axis "
"https://github.com/mbostock/\n"
"d3/wiki/Formatting"),
'markup_type': SelectField(
"Markup Type",
choices=self.choicify(['markdown', 'html']),
default="markdown",
description="Pick your favorite markup language"),
'rotation': SelectField(
"Rotation",
choices=[(s, s) for s in ['random', 'flat', 'square']],
default="random",
description="Rotation to apply to words in the cloud"),
'line_interpolation': SelectField(
"Line Style",
choices=self.choicify([
'linear', 'basis', 'cardinal', 'monotone',
'step-before', 'step-after']),
default='linear',
description="Line interpolation as defined by d3.js"),
'code': TextAreaField(
"Code", description="Put your code here", default=''),
'pandas_aggfunc': SelectField(
"Aggregation function",
choices=self.choicify([
'sum', 'mean', 'min', 'max', 'median', 'stdev', 'var']),
default='sum',
description=(
"Aggregate function to apply when pivoting and "
"computing the total rows and columns")),
'size_from': TextField(
"Font Size From",
default="20",
description="Font size for the smallest value in the list"),
'size_to': TextField(
"Font Size To",
default="150",
description="Font size for the biggest value in the list"),
'show_brush': BetterBooleanField(
"Range Filter", default=False,
description=(
"Whether to display the time range interactive selector")),
'show_datatable': BetterBooleanField(
"Data Table", default=False,
description="Whether to display the interactive data table"),
'include_search': BetterBooleanField(
"Search Box", default=False,
description=(
"Whether to include a client side search box")),
'show_bubbles': BetterBooleanField(
"Show Bubbles", default=False,
description=(
"Whether to display bubbles on top of countries")),
'show_legend': BetterBooleanField(
"Legend", default=True,
description="Whether to display the legend (toggles)"),
'x_axis_showminmax': BetterBooleanField(
"X bounds", default=True,
description=(
"Whether to display the min and max values of the X axis")),
'rich_tooltip': BetterBooleanField(
"Rich Tooltip", default=True,
description=(
"The rich tooltip shows a list of all series for that"
" point in time")),
'y_axis_zero': BetterBooleanField(
"Y Axis Zero", default=False,
description=(
"Force the Y axis to start at 0 instead of the minimum "
"value")),
'y_log_scale': BetterBooleanField(
"Y Log", default=False,
description="Use a log scale for the Y axis"),
'x_log_scale': BetterBooleanField(
"X Log", default=False,
description="Use a log scale for the X axis"),
'donut': BetterBooleanField(
"Donut", default=False,
description="Do you want a donut or a pie?"),
'contribution': BetterBooleanField(
"Contribution", default=False,
description="Compute the contribution to the total"),
'num_period_compare': IntegerField(
"Period Ratio", default=None,
validators=[validators.optional()],
description=(
"[integer] Number of period to compare against, "
"this is relative to the granularity selected")),
'time_compare': TextField(
"Time Shift",
default="",
description=(
"Overlay a timeseries from a "
"relative time period. Expects relative time delta "
"in natural language (example: 24 hours, 7 days, "
"56 weeks, 365 days")),
}
@staticmethod
def choicify(l):
return [("{}".format(obj), "{}".format(obj)) for obj in l]
def get_form(self):
"""Returns a form object based on the viz/datasource/context"""
viz = self.viz
field_css_classes = {}
for name, obj in self.field_dict.items():
field_css_classes[name] = ['form-control']
s = self.fieltype_class.get(obj.field_class)
if s:
field_css_classes[name] += [s]
for field in ('show_brush', 'show_legend', 'rich_tooltip'):
field_css_classes[field] += ['input-sm']
class QueryForm(OmgWtForm):
"""The dynamic form object used for the explore view"""
fieldsets = copy(viz.fieldsets)
css_classes = field_css_classes
standalone = HiddenField()
async = HiddenField()
force = HiddenField()
extra_filters = HiddenField()
json = HiddenField()
slice_id = HiddenField()
slice_name = HiddenField()
previous_viz_type = HiddenField(default=viz.viz_type)
collapsed_fieldsets = HiddenField()
viz_type = self.field_dict.get('viz_type')
filter_cols = viz.datasource.filterable_column_names or ['']
for i in range(10):
setattr(QueryForm, 'flt_col_' + str(i), SelectField(
'Filter 1',
default=filter_cols[0],
choices=self.choicify(filter_cols)))
setattr(QueryForm, 'flt_op_' + str(i), SelectField(
'Filter 1',
default='in',
choices=self.choicify(['in', 'not in'])))
setattr(
QueryForm, 'flt_eq_' + str(i),
TextField("Super", default=''))
for field in viz.flat_form_fields():
setattr(QueryForm, field, self.field_dict[field])
def add_to_form(attrs):
for attr in attrs:
setattr(QueryForm, attr, self.field_dict[attr])
# datasource type specific form elements
if viz.datasource.__class__.__name__ == 'SqlaTable':
QueryForm.fieldsets += ({
'label': 'SQL',
'fields': ['where', 'having'],
'description': (
"This section exposes ways to include snippets of "
"SQL in your query"),
},)
add_to_form(('where', 'having'))
grains = viz.datasource.database.grains()
if not viz.datasource.any_dttm_col:
return QueryForm
if grains:
time_fields = ('granularity_sqla', 'time_grain_sqla')
self.field_dict['time_grain_sqla'] = SelectField(
'Time Grain',
choices=self.choicify((grain.name for grain in grains)),
default="Time Column",
description=(
"The time granularity for the visualization. This "
"applies a date transformation to alter "
"your time column and defines a new time granularity."
"The options here are defined on a per database "
"engine basis in the Caravel source code"))
add_to_form(time_fields)
field_css_classes['time_grain_sqla'] = ['form-control', 'select2']
field_css_classes['granularity_sqla'] = ['form-control', 'select2']
else:
time_fields = 'granularity_sqla'
add_to_form((time_fields, ))
else:
time_fields = 'granularity'
add_to_form(('granularity',))
field_css_classes['granularity'] = ['form-control', 'select2']
add_to_form(('since', 'until'))
QueryForm.fieldsets = ({
'label': 'Time',
'fields': (
time_fields,
('since', 'until'),
),
'description': "Time related form attributes",
},) + tuple(QueryForm.fieldsets)
return QueryForm
| apache-2.0 |
dingocuster/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/datasets/heart/data.py | 25 | 1858 | """Heart Transplant Data, Miller 1976"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """???"""
TITLE = """Transplant Survival Data"""
SOURCE = """ Miller, R. (1976). Least squares regression with censored dara. Biometrica, 63 (3). 449-464.
"""
DESCRSHORT = """Survival times after receiving a heart transplant"""
DESCRLONG = """This data contains the survival time after receiving a heart transplant, the age of the patient and whether or not the survival time was censored.
"""
NOTE = """::
Number of Observations - 69
Number of Variables - 3
Variable name definitions::
death - Days after surgery until death
age - age at the time of surgery
censored - indicates if an observation is censored. 1 is uncensored
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
dset = du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
dset.censors = dset.exog[:,0]
dset.exog = dset.exog[:,1]
return dset
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/heart.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
| bsd-3-clause |
caisq/tensorflow | tensorflow/python/estimator/canned/baseline_test.py | 11 | 54918 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import baseline
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressor(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifier(*args, **kwargs)
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132)
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = 1.3133 + 0.3132
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = 1.3133 * 1. + 0.3132 * 2.
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 2. / (1. + 2.),
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'classes': [label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with ops.Graph().as_default():
logit_fn = baseline._baseline_logit_fn_builder(num_outputs=2)
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('baseline', reuse=True):
bias_var = variable_scope.get_variable('bias')
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
michigraber/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
zfrenchee/pandas | pandas/core/computation/expressions.py | 1 | 7066 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas.core.common import _values_from_object
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(_values_from_object(cond), _values_from_object(a),
_values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| bsd-3-clause |
jseabold/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
russel1237/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/formats/test_printing.py | 8 | 4905 | # -*- coding: utf-8 -*-
import nose
from pandas import compat
import pandas.formats.printing as printing
import pandas.formats.format as fmt
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
tm.assert_equal(res, repr(b))
res = printing.pprint_thing(b, quote_strings=False)
tm.assert_equal(res, b)
class TestFormattBase(tm.TestCase):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 13)
self.assertEqual(adj.len(cols[1]), 13)
self.assertEqual(adj.len(cols[2]), 16)
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 23)
self.assertEqual(adj.len(cols[1]), 23)
self.assertEqual(adj.len(cols[2]), 26)
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
self.assertEqual(just('abc', 5, mode='left'), 'abc ')
self.assertEqual(just('abc', 5, mode='center'), ' abc ')
self.assertEqual(just('abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'abc', 5, mode='left'), 'abc ')
self.assertEqual(just(u'abc', 5, mode='center'), ' abc ')
self.assertEqual(just(u'abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'パンダ', 5, mode='left'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='center'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='right'), u'パンダ')
self.assertEqual(just(u'パンダ', 10, mode='left'), u'パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='center'), u' パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='right'), u' パンダ')
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len('abc'), 3)
self.assertEqual(adj.len(u'abc'), 3)
self.assertEqual(adj.len(u'パンダ'), 6)
self.assertEqual(adj.len(u'パンダ'), 5)
self.assertEqual(adj.len(u'パンダpanda'), 11)
self.assertEqual(adj.len(u'パンダpanda'), 10)
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 4)
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 6)
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
costypetrisor/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
haphaeu/yoshimi | Qt/MotionCurves/orbit.py | 1 | 5357 | # -*- coding: utf-8 -*-
"""
Ctrl+E to clear screen.
Created on Tue Aug 16 15:44:46 2016
@author: rarossi
"""
from PyQt4 import QtGui, QtCore
import numpy as np
import scipy.interpolate as itp
from math import sin, cos
from matplotlib import pyplot as plt
from time import sleep
class Window(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
# make sure mouse motion is captured by mouseMoveEvent
# otherwise only drag is captured
self.setMouseTracking(True)
self.earth_orbit = []
self.scaled_earth_orbit = []
self.sun_pos = np.array((0, 0))
self.scaled_sun_pos = 0
self.r_earth = 6378.137e3 # m
self.r_sun = 695700e3 # m
self.scaled_r_earth = 0
self.scaled_r_sun = 0
self.get_earth_orbit()
self.scale_things()
def resizeEvent(self, ev):
print('resize')
self.scale_things()
#self.update()
QtGui.QWidget.resizeEvent(self, ev)
def mousePressEvent(self, event):
pt = event.pos()
self.update()
QtCore.QCoreApplication.processEvents()
def mouseMoveEvent(self, ev):
pt = ev.pos()
for p in self.earth_orbit:
if max(abs(p[0] - pt.x()), abs(p[1] - pt.y())) < 10:
self.hit = p
break
else:
self.hit = False
self.update()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawLines(qp)
qp.end()
def drawLines(self, qp):
if len(self.scaled_earth_orbit) > 0:
qp.setPen(QtCore.Qt.red)
for i in range(len(self.scaled_earth_orbit)-1):
qp.drawLine(self.scaled_earth_orbit[i][0], self.scaled_earth_orbit[i][1],
self.scaled_earth_orbit[i+1][0], self.scaled_earth_orbit[i+1][1])
# draw the sun
qp.drawEllipse(*self.scaled_sun_pos, 2*self.scaled_r_sun, 2*self.scaled_r_sun)
def keyPressEvent(self, e):
if (e.modifiers() & QtCore.Qt.ControlModifier):
if e.key() == QtCore.Qt.Key_E: # copy
self.points = []
self.splx, self.sply = [], []
self.update()
QtCore.QCoreApplication.processEvents()
def get_earth_orbit(self, year_resolution=365):
"""Solver Earth's orbit motion using classic Newton mechanics.
The Earth is subject to an acceleration pointing towards the sun.
The initial conditions, the Earth is put at the Aphelion with
maximum orbital speed.
https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
https://nssdc.gsfc.nasa.gov/planetary/factsheet/sunfact.html
"""
# Constants
G = 6.674e-11 # N*(m/kg)^2
m_earth = 5.9723e24 # kg
m_sun = 1988500e24 # kg
aphelion = 147.09e9 # m
max_orbital_speed = 30.29e3 # m/s
# earth starts at the aphelion
# sun is at (0, 0)
pos_earth = np.array((-aphelion, 0)) # m
vel_earth = np.array((0, max_orbital_speed)) # m/s
dt = 365.256*24*3600/(year_resolution-1) # s
t = 0 # s
self.earth_orbit = np.zeros((year_resolution, 2))
self.earth_orbit[0] = pos_earth
for i in range(1, year_resolution):
t += dt
r = pos_earth.dot(pos_earth)**0.5 # distance earth-sun
u = -pos_earth/r # unit vector pointing towards the sun
force = G * m_earth * m_sun / r**2 * u
accel = force / m_earth
vel_earth += accel*dt
pos_earth += vel_earth*dt
self.earth_orbit[i] = pos_earth
with open('orbit.txt', 'w') as pf:
pf.write(np.array2string(self.earth_orbit))
def scale_things(self):
pad = np.array((10, 10))
canvas_size = np.array([self.size().width(), self.size().height()]) - 2*pad
shift = pad + canvas_size/2
orbit_range = self.earth_orbit.max(axis=0) - self.earth_orbit.min(axis=0)
scale = max(orbit_range / canvas_size)
self.scaled_earth_orbit = self.earth_orbit / scale + shift
self.scaled_r_sun = self.r_sun / scale
self.scaled_r_earth = self.r_earth / scale
self.scaled_sun_pos = self.sun_pos / scale + shift
print('canvas_size', canvas_size)
print('orbit_range', orbit_range)
print('scale', scale)
# Deprecated
#def kepler(M, e, tol=1e-4, max_iters=100, verbose=False, out_stats=False):
# """Solves Kepler's Equation.
#
# M: mean anomaly
# e: eccentricity
#
# Return:
#
# E: eccentric anomaly
#
# https://en.wikipedia.org/wiki/Kepler%27s_equation
# """
# E = M
# err = 9.9
# i = 0
# if verbose:
# print('iter\t E\t err')
# while abs(err) > tol and i < max_iters:
# # Newton method to solve trancendental equation
# err = E-e*sin(E)-M / (1 - e*cos(E))
# E -= err
# i += 1
# if verbose:
# print('%02d\t%.8f\t%.8f' % (i, E, err))
# if out_stats:
# return E, err, i
# else:
# return E
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.resize(640, 480)
window.show()
sys.exit(app.exec_())
| lgpl-3.0 |
benanne/morb | examples/example_mnist_labeled.py | 1 | 6200 | import morb
from morb import rbms, stats, updaters, trainers, monitors, units, parameters
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
plt.ion()
from utils import generate_data, get_context, one_hot
# DEBUGGING
from theano import ProfileMode
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
train_set_x, train_set_y = train_set
valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# convert labels to one hot representation
train_set_y_oh = one_hot(np.atleast_2d(train_set_y).T)
valid_set_y_oh = one_hot(np.atleast_2d(valid_set_y).T)
test_set_y_oh = one_hot(np.atleast_2d(test_set_y).T)
# dim 0 = minibatches, dim 1 = units, dim 2 = states
train_set_y_oh = train_set_y_oh.reshape((train_set_y_oh.shape[0], 1, train_set_y_oh.shape[1]))
valid_set_y_oh = valid_set_y_oh.reshape((valid_set_y_oh.shape[0], 1, valid_set_y_oh.shape[1]))
test_set_y_oh = test_set_y_oh.reshape((test_set_y_oh.shape[0], 1, test_set_y_oh.shape[1]))
# make the sets a bit smaller for testing purposes
train_set_x = train_set_x[:10000]
train_set_y_oh = train_set_y_oh[:10000]
valid_set_x = valid_set_x[:1000]
valid_set_y_oh = valid_set_y_oh[:1000]
n_visible = train_set_x.shape[1]
n_hidden = 100
n_states = train_set_y_oh.shape[2]
print ">> Constructing RBM..."
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
# add softmax unit for context
rbm.s = units.SoftmaxUnits(rbm, name='s')
# link context and hiddens
initial_Ws = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(n_hidden+1+n_states)),
high = 4*np.sqrt(6./(n_hidden+1+n_states)),
size = (1, n_states, n_hidden)),
dtype = theano.config.floatX)
rbm.Ws = parameters.AdvancedProdParameters(rbm, [rbm.s, rbm.h], [2, 1], theano.shared(value = initial_Ws, name='Ws'), name='Ws')
initial_vmap = { rbm.v: T.matrix('v'), rbm.s: T.tensor3('s') }
# try to calculate weight updates using CD-1 stats
print ">> Constructing contrastive divergence updaters..."
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], context_units=[rbm.s], k=1, mean_field_for_stats=[rbm.v], mean_field_for_gibbs=[rbm.v])
# s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v, rbm.s], hidden_units=[rbm.h], k=1, mean_field_for_stats=[rbm.v], mean_field_for_gibbs=[rbm.v])
umap = {}
for var in rbm.variables:
pu = var + 0.001 * updaters.CDUpdater(rbm, var, s)
umap[var] = pu
print ">> Compiling functions..."
t = trainers.MinibatchTrainer(rbm, umap)
m = monitors.reconstruction_mse(s, rbm.v)
m_data = s['data'][rbm.v]
m_model = s['model'][rbm.v]
e_data = rbm.energy(s['data']).mean()
e_model = rbm.energy(s['model']).mean()
train = t.compile_function(initial_vmap, mb_size=100, monitors=[m, e_data, e_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=100, monitors=[m, m_data, m_model, e_data, e_model], name='evaluate', train=False, mode=mode)
def plot_data(d):
plt.figure(5)
plt.clf()
plt.imshow(d.reshape((28,28)), interpolation='gaussian')
plt.draw()
def sample_evolution(start, cls, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
label = one_hot(np.atleast_2d(cls), dim=10)
label = label.reshape((label.shape[0], 1, label.shape[1]))
while True:
for k in range(ns):
for x in sample({ rbm.v: data, rbm.s: label }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
epochs = 200
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x, rbm.s: train_set_y_oh })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x, rbm.s: valid_set_y_oh })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
plt.draw()
plt.figure(4)
plt.clf()
plt.plot(edata_so_far, label='validation / data')
plt.plot(emodel_so_far, label='validation / model')
plt.plot(edata_train_so_far, label='train / data')
plt.plot(emodel_train_so_far, label='train / model')
plt.title("energy")
plt.legend()
plt.draw()
# plot some samples
plt.figure(2)
plt.clf()
plt.imshow(vdata[0][0].reshape((28, 28)))
plt.draw()
plt.figure(3)
plt.clf()
plt.imshow(vmodel[0][0].reshape((28, 28)))
plt.draw()
print "Epoch %d" % epoch
print "training set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_train, edata_train, emodel_train)
print "validation set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_valid, edata_valid, emodel_valid)
| gpl-3.0 |
SedFoam/sedfoam | tutorials/Py/plot_tuto1DBedLoadCLB.py | 1 | 3893 | import subprocess
import sys
import numpy as np
import fluidfoam
from pylab import matplotlib, mpl, figure, subplot, savefig, show
import matplotlib.gridspec as gridspec
from analytic_coulomb2D import analytic_coulomb2D
#
# Change fontsize
#
matplotlib.rcParams.update({'font.size': 20})
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['lines.markersize'] = 5
mpl.rcParams['lines.markeredgewidth'] = 1
#
# Change subplot sizes
#
gs = gridspec.GridSpec(1, 3)
gs.update(left=0.1, right=0.95, top=0.95,
bottom=0.1, wspace=0.125, hspace=0.25)
#
# Figure size
#
figwidth = 18
figheight = 9
#
#
#
zmin = 0.
zmax = 0.065
#
# compute the analytical solution in dimensionless form
#
nx = 200
xex = np.linspace(0, 1., nx)
# dimensionless parameters
mus = 0.32
phi0 = 0.6
eta_e = (1. + 2.5 * phi0)
# dimensional parameters
D = 0.065
etaf = 0.27
rho_f = 1070.
rho_p = 1190.
drho = rho_p - rho_f
g = 9.81
hp = 0.03225 / D
print("hp=" + str(hp))
# pressure gradient
dpdx = -100. / (drho * g)
# Compute the analytical solution
alphaex = np.ones(nx) * phi0
toto = np.where(xex[:] > hp)
alphaex[toto] = 0.
pex = np.zeros(nx)
for i in range(nx):
if alphaex[nx - i - 1] > 0.:
pex[nx - i - 1] = pex[nx - i] + alphaex[nx - i] * \
(xex[nx - i] - xex[nx - i - 1])
[uex, hc] = analytic_coulomb2D(nx, xex, dpdx, hp, mus, phi0, eta_e)
duxmax = 0.
nuex = np.zeros(nx)
for i in range(nx - 1):
duexdz = (uex[i] - uex[i - 1]) / (xex[i] - xex[i - 1])
duxmax = max([duxmax, duexdz])
nuex[i] = mus * pex[i] / (rho_p * (np.abs(duexdz) + 1e-6))
#
# dimensional form
#
U0 = drho * g * D**2 / etaf
uex = uex * U0
xex = xex * D
pex = pex * drho * g * D
print("max(uex)=" + str(np.max(uex)) + " m/s")
#########################################
# Loading OpenFoam results
#########################################
#
case = '1DBedLoad'
basepath = '../'
# basepath='../../'
sol = basepath + case + '/'
try:
proc = subprocess.Popen(
['foamListTimes', '-latestTime', '-case', sol], stdout=subprocess.PIPE)
except:
print("foamListTimes : command not found")
print("Do you have load OpenFoam environement?")
sys.exit(0)
output = proc.stdout.read()
tread = output.decode().rstrip().split('\n')[0]
Nx = 1
Ny = 200
Nz = 1
eps_file = sol + case + '.eps'
#########################################
# Reading SedFoam results
#########################################
X, Y, Z = fluidfoam.readmesh(sol)
alpha = fluidfoam.readscalar(sol, tread, 'alpha_a')
Ua = fluidfoam.readvector(sol, tread, 'Ua')
Ub = fluidfoam.readvector(sol, tread, 'Ub')
pff = fluidfoam.readscalar(sol, tread, 'pff')
tau = fluidfoam.readtensor(sol, tread, 'Taua')[3]
#p = fluidfoam.readscalar(sol, tread, 'p')
Ny = np.size(Y)
U = np.zeros(Ny)
U = alpha[:] * Ua[0, :] + (1 - alpha[:]) * Ub[0, :]
print("max(Ub)=" + str(np.amax(Ub)) + " m/s")
#########################################
# figure 1
#########################################
figure(num=1, figsize=(figwidth, figheight),
dpi=60, facecolor='w', edgecolor='w')
ax1 = subplot(gs[0, 0])
l11, = ax1.plot(alpha[:], Y[:], '-r')
l1, = ax1.plot(alphaex[:], xex[:], '--k')
ax1.set_ylabel('y (m)')
ax1.set_xlabel(r'$\alpha$')
ax1.set_xlim(0, np.max(np.max(alpha)) * 1.1)
ax1.set_ylim(zmin, zmax)
ax2 = subplot(gs[0, 1])
l21, = ax2.plot(U[:], Y[:], '-r')
l2, = ax2.plot(uex[:], xex[:], '--k')
ax2.set_xlabel('u ($m/s$)')
ax2.set_xlim(0, np.max(uex) * 1.1)
ax2.set_ylim(zmin, zmax)
ax2.set_yticklabels([''])
ax3 = subplot(gs[0, 2])
l31, = ax3.plot(pff[:], Y[:], '-r')
l3, = ax3.plot(pex[:], xex[:], '--k')
ax3.set_xlabel('p ($N/m^2$)')
ax3.set_xlim(0, np.max(pex) * 1.1)
ax3.set_ylim(zmin, zmax)
ax3.set_yticklabels([''])
savefig('Figures/res1_tuto2.png', facecolor='w', edgecolor='w', format='png')
show(block=True)
# Fix Python 2.x.
try: input = raw_input
except NameError: pass
toto = input("Hit a key to close the figure")
| gpl-2.0 |
davidbrazdil/nacl | site_scons/site_tools/naclsdk.py | 1 | 26632 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NaCl SDK tool SCons."""
import __builtin__
import re
import os
import shutil
import sys
import SCons.Scanner
import SCons.Script
import subprocess
import tempfile
NACL_TOOL_MAP = {
'arm': {
'32': {
'tooldir': 'arm-nacl',
'as_flag': '',
'cc_flag': '',
'ld_flag': '',
},
},
'x86': {
'32': {
'tooldir': 'i686-nacl',
'other_libdir': 'lib32',
'as_flag': '--32',
'cc_flag': '-m32',
'ld_flag': ' -melf_i386_nacl',
},
'64': {
'tooldir': 'x86_64-nacl',
'other_libdir': 'lib64',
'as_flag': '--64',
'cc_flag': '-m64',
'ld_flag': ' -melf_x86_64_nacl',
},
},
}
def _StubOutEnvToolsForBuiltElsewhere(env):
"""Stub out all tools so that they point to 'true'.
Some machines have their code built by another machine, they'll therefore
run 'true' instead of running the usual build tools.
Args:
env: The SCons environment in question.
"""
assert(env.Bit('built_elsewhere'))
env.Replace(CC='true', CXX='true', LINK='true', AR='true',
RANLIB='true', AS='true', ASPP='true', LD='true',
STRIP='true')
def _SetEnvForNativeSdk(env, sdk_path):
"""Initialize environment according to target architecture."""
bin_path = os.path.join(sdk_path, 'bin')
# NOTE: attempts to eliminate this PATH setting and use
# absolute path have been futile
env.PrependENVPath('PATH', bin_path)
tool_prefix = None
tool_map = NACL_TOOL_MAP[env['TARGET_ARCHITECTURE']]
subarch_spec = tool_map[env['TARGET_SUBARCH']]
tooldir = subarch_spec['tooldir']
# We need to pass it extra options for the subarch we are building.
as_mode_flag = subarch_spec['as_flag']
cc_mode_flag = subarch_spec['cc_flag']
ld_mode_flag = subarch_spec['ld_flag']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# The tooldir for the build target exists.
# The tools there do the right thing without special options.
tool_prefix = tooldir
libdir = os.path.join(tooldir, 'lib')
else:
# We're building for a target for which there is no matching tooldir.
# For example, for x86-32 when only <sdk_path>/x86_64-nacl/ exists.
# Find a tooldir for a different subarch that does exist.
others_map = tool_map.copy()
del others_map[env['TARGET_SUBARCH']]
for subarch, tool_spec in others_map.iteritems():
tooldir = tool_spec['tooldir']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# OK, this is the other subarch to use as tooldir.
tool_prefix = tooldir
# The lib directory may have an alternate name, i.e.
# 'lib32' in the x86_64-nacl tooldir.
libdir = os.path.join(tooldir, subarch_spec.get('other_libdir', 'lib'))
break
if tool_prefix is None:
raise Exception("Cannot find a toolchain for %s in %s" %
(env['TARGET_FULLARCH'], sdk_path))
env.Replace(# Replace header and lib paths.
# where to put nacl extra sdk headers
# TODO(robertm): switch to using the mechanism that
# passes arguments to scons
NACL_SDK_INCLUDE='%s/%s/include' % (sdk_path, tool_prefix),
# where to find/put nacl generic extra sdk libraries
NACL_SDK_LIB='%s/%s' % (sdk_path, libdir),
# Replace the normal unix tools with the NaCl ones.
CC=os.path.join(bin_path, '%s-gcc' % tool_prefix),
CXX=os.path.join(bin_path, '%s-g++' % tool_prefix),
AR=os.path.join(bin_path, '%s-ar' % tool_prefix),
AS=os.path.join(bin_path, '%s-as' % tool_prefix),
ASPP=os.path.join(bin_path, '%s-gcc' % tool_prefix),
GDB=os.path.join(bin_path, '%s-gdb' % tool_prefix),
# NOTE: use g++ for linking so we can handle C AND C++.
LINK=os.path.join(bin_path, '%s-g++' % tool_prefix),
# Grrr... and sometimes we really need ld.
LD=os.path.join(bin_path, '%s-ld' % tool_prefix) + ld_mode_flag,
RANLIB=os.path.join(bin_path, '%s-ranlib' % tool_prefix),
NM=os.path.join(bin_path, '%s-nm' % tool_prefix),
OBJDUMP=os.path.join(bin_path, '%s-objdump' % tool_prefix),
STRIP=os.path.join(bin_path, '%s-strip' % tool_prefix),
ADDR2LINE=os.path.join(bin_path, '%s-addr2line' % tool_prefix),
BASE_LINKFLAGS=[cc_mode_flag],
BASE_CFLAGS=[cc_mode_flag],
BASE_CXXFLAGS=[cc_mode_flag],
BASE_ASFLAGS=[as_mode_flag],
BASE_ASPPFLAGS=[cc_mode_flag],
CFLAGS=['-std=gnu99'],
CCFLAGS=['-O3',
'-Werror',
'-Wall',
'-Wno-variadic-macros',
'-Wswitch-enum',
'-g',
'-fno-stack-protector',
'-fdiagnostics-show-option',
'-pedantic',
'-D__linux__',
],
ASFLAGS=[],
)
# NaClSdk environment seems to be inherited from the host environment.
# On Linux host, this probably makes sense. On Windows and Mac, this
# introduces nothing except problems.
# For now, simply override the environment settings as in
# <scons>/engine/SCons/Platform/posix.py
env.Replace(LIBPREFIX='lib',
LIBSUFFIX='.a',
SHLIBPREFIX='$LIBPREFIX',
SHLIBSUFFIX='.so',
LIBPREFIXES=['$LIBPREFIX'],
LIBSUFFIXES=['$LIBSUFFIX', '$SHLIBSUFFIX'],
)
# Force -fPIC when compiling for shared libraries.
env.AppendUnique(SHCCFLAGS=['-fPIC'],
)
def _SetEnvForPnacl(env, root):
# All the PNaCl tools require Python to be in the PATH.
arch = env['TARGET_FULLARCH']
assert arch in ['arm', 'mips32', 'x86-32', 'x86-64']
if env.Bit('pnacl_unsandboxed'):
if env.Bit('host_linux'):
arch = '%s-linux' % arch
elif env.Bit('host_mac'):
arch = '%s-mac' % arch
if env.Bit('nonsfi_nacl'):
arch += '-nonsfi'
arch_flag = ' -arch %s' % arch
if env.Bit('pnacl_generate_pexe'):
ld_arch_flag = ''
else:
ld_arch_flag = arch_flag
translator_root = os.path.join(os.path.dirname(root), 'pnacl_translator')
binprefix = os.path.join(root, 'bin', 'pnacl-')
binext = ''
if env.Bit('host_windows'):
binext = '.bat'
pnacl_ar = binprefix + 'ar' + binext
pnacl_as = binprefix + 'as' + binext
pnacl_nm = binprefix + 'nm' + binext
pnacl_ranlib = binprefix + 'ranlib' + binext
# Use the standalone sandboxed translator in sbtc mode
if env.Bit('use_sandboxed_translator'):
pnacl_translate = os.path.join(translator_root, 'bin',
'pnacl-translate' + binext)
else:
pnacl_translate = binprefix + 'translate' + binext
pnacl_cc = binprefix + 'clang' + binext
pnacl_cxx = binprefix + 'clang++' + binext
pnacl_ld = binprefix + 'ld' + binext
pnacl_disass = binprefix + 'dis' + binext
pnacl_finalize = binprefix + 'finalize' + binext
pnacl_strip = binprefix + 'strip' + binext
# NOTE: XXX_flags start with space for easy concatenation
# The flags generated here get baked into the commands (CC, CXX, LINK)
# instead of CFLAGS etc to keep them from getting blown away by some
# tests. Don't add flags here unless they always need to be preserved.
pnacl_cxx_flags = ''
pnacl_cc_flags = ' -std=gnu99'
pnacl_ld_flags = ' ' + ' '.join(env['PNACL_BCLDFLAGS'])
pnacl_translate_flags = ''
if env.Bit('nacl_pic'):
pnacl_cc_flags += ' -fPIC'
pnacl_cxx_flags += ' -fPIC'
# NOTE: this is a special hack for the pnacl backend which
# does more than linking
pnacl_ld_flags += ' -fPIC'
pnacl_translate_flags += ' -fPIC'
if env.Bit('use_sandboxed_translator'):
sb_flags = ' --pnacl-sb'
pnacl_ld_flags += sb_flags
pnacl_translate_flags += sb_flags
if env.Bit('x86_64_zero_based_sandbox'):
pnacl_translate_flags += ' -sfi-zero-based-sandbox'
env.Replace(# Replace header and lib paths.
NACL_SDK_INCLUDE=os.path.join(root, 'usr', 'include'),
NACL_SDK_LIB=os.path.join(root, 'lib'),
# Remove arch-specific flags (if any)
BASE_LINKFLAGS='',
BASE_CFLAGS='',
BASE_CXXFLAGS='',
BASE_ASFLAGS='',
BASE_ASPPFLAGS='',
# Replace the normal unix tools with the PNaCl ones.
CC=pnacl_cc + pnacl_cc_flags,
CXX=pnacl_cxx + pnacl_cxx_flags,
ASPP=pnacl_cc + pnacl_cc_flags,
LIBPREFIX="lib",
SHLIBPREFIX="lib",
SHLIBSUFFIX=".so",
OBJSUFFIX=".bc",
LINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
# Although we are currently forced to produce native output
# for LINK, we are free to produce bitcode for SHLINK
# (SharedLibrary linking) because scons doesn't do anything
# with shared libraries except use them with the toolchain.
SHLINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
LD=pnacl_ld,
AR=pnacl_ar,
AS=pnacl_as + ld_arch_flag,
RANLIB=pnacl_ranlib,
DISASS=pnacl_disass,
OBJDUMP=pnacl_disass,
STRIP=pnacl_strip,
TRANSLATE=pnacl_translate + arch_flag + pnacl_translate_flags,
PNACLFINALIZE=pnacl_finalize,
)
if env.Bit('built_elsewhere'):
def FakeInstall(dest, source, env):
print 'Not installing', dest
_StubOutEnvToolsForBuiltElsewhere(env)
env.Replace(INSTALL=FakeInstall)
if env.Bit('translate_in_build_step'):
env.Replace(TRANSLATE='true')
env.Replace(PNACLFINALIZE='true')
def PNaClForceNative(env):
assert(env.Bit('bitcode'))
if env.Bit('pnacl_generate_pexe'):
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
env.Replace(OBJSUFFIX='.o',
SHLIBSUFFIX='.so')
arch_flag = ' -arch ${TARGET_FULLARCH}'
cc_flags = ' --pnacl-allow-native --pnacl-allow-translate'
env.Append(CC=arch_flag + cc_flags,
CXX=arch_flag + cc_flags,
ASPP=arch_flag + cc_flags,
LINK=cc_flags) # Already has -arch
env['LD'] = 'NO-NATIVE-LD-INVOCATION-ALLOWED'
env['SHLINK'] = '${LINK}'
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
# Get an environment for nacl-gcc when in PNaCl mode.
def PNaClGetNNaClEnv(env):
assert(env.Bit('bitcode'))
assert(not env.Bit('target_mips32'))
# This is kind of a hack. We clone the environment,
# clear the bitcode bit, and then reload naclsdk.py
native_env = env.Clone()
native_env.ClearBits('bitcode')
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
else:
native_env = native_env.Clone(tools=['naclsdk'])
if native_env.Bit('pnacl_generate_pexe'):
native_env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
else:
# These are unfortunately clobbered by running Tool.
native_env.Replace(EXTRA_CFLAGS=env['EXTRA_CFLAGS'],
EXTRA_CXXFLAGS=env['EXTRA_CXXFLAGS'],
CCFLAGS=env['CCFLAGS'],
CFLAGS=env['CFLAGS'],
CXXFLAGS=env['CXXFLAGS'])
return native_env
# This adds architecture specific defines for the target architecture.
# These are normally omitted by PNaCl.
# For example: __i686__, __arm__, __mips__, __x86_64__
def AddBiasForPNaCl(env, temporarily_allow=True):
assert(env.Bit('bitcode'))
# re: the temporarily_allow flag -- that is for:
# BUG= http://code.google.com/p/nativeclient/issues/detail?id=1248
if env.Bit('pnacl_generate_pexe') and not temporarily_allow:
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
if env.Bit('target_arm'):
env.AppendUnique(CCFLAGS=['--pnacl-arm-bias'],
ASPPFLAGS=['--pnacl-arm-bias'])
elif env.Bit('target_x86_32'):
env.AppendUnique(CCFLAGS=['--pnacl-i686-bias'],
ASPPFLAGS=['--pnacl-i686-bias'])
elif env.Bit('target_x86_64'):
env.AppendUnique(CCFLAGS=['--pnacl-x86_64-bias'],
ASPPFLAGS=['--pnacl-x86_64-bias'])
elif env.Bit('target_mips32'):
env.AppendUnique(CCFLAGS=['--pnacl-mips-bias'],
ASPPFLAGS=['--pnacl-mips-bias'])
else:
raise Exception("Unknown architecture!")
def ValidateSdk(env):
checkables = ['${NACL_SDK_INCLUDE}/stdio.h']
for c in checkables:
if os.path.exists(env.subst(c)):
continue
# Windows build does not use cygwin and so can not see nacl subdirectory
# if it's cygwin's symlink - check for /include instead...
if os.path.exists(re.sub(r'(nacl64|nacl)/include/([^/]*)$',
r'include/\2',
env.subst(c))):
continue
# TODO(pasko): remove the legacy header presence test below.
if os.path.exists(re.sub(r'nacl/include/([^/]*)$',
r'nacl64/include/\1',
env.subst(c))):
continue
message = env.subst('''
ERROR: NativeClient toolchain does not seem present!,
Missing: %s
Configuration is:
NACL_SDK_INCLUDE=${NACL_SDK_INCLUDE}
NACL_SDK_LIB=${NACL_SDK_LIB}
CC=${CC}
CXX=${CXX}
AR=${AR}
AS=${AS}
ASPP=${ASPP}
LINK=${LINK}
RANLIB=${RANLIB}
Run: gclient runhooks --force or build the SDK yourself.
''' % c)
sys.stderr.write(message + "\n\n")
sys.exit(-1)
def ScanLinkerScript(node, env, libpath):
"""SCons scanner for linker script files.
This handles trivial linker scripts like those used for libc.so and libppapi.a.
These scripts just indicate more input files to be linked in, so we want
to produce dependencies on them.
A typical such linker script looks like:
/* Some comments. */
INPUT ( foo.a libbar.a libbaz.a )
or:
/* GNU ld script
Use the shared library, but some functions are only in
the static library, so try that secondarily. */
OUTPUT_FORMAT(elf64-x86-64)
GROUP ( /lib/libc.so.6 /usr/lib/libc_nonshared.a
AS_NEEDED ( /lib/ld-linux-x86-64.so.2 ) )
"""
contents = node.get_text_contents()
if contents.startswith('!<arch>\n') or contents.startswith('\177ELF'):
# An archive or ELF file is not a linker script.
return []
comment_pattern = re.compile(r'/\*.*?\*/', re.DOTALL | re.MULTILINE)
def remove_comments(text):
return re.sub(comment_pattern, '', text)
tokens = remove_comments(contents).split()
libs = []
while tokens:
token = tokens.pop()
if token.startswith('OUTPUT_FORMAT('):
pass
elif token == 'OUTPUT_FORMAT':
# Swallow the next three tokens: '(', 'xyz', ')'
del tokens[0:2]
elif token in ['(', ')', 'INPUT', 'GROUP', 'AS_NEEDED']:
pass
else:
libs.append(token)
# Find those items in the library path, ignoring ones we fail to find.
found = [SCons.Node.FS.find_file(lib, libpath) for lib in libs]
return [lib for lib in found if lib is not None]
# This is a modified copy of the class TempFileMunge in
# third_party/scons-2.0.1/engine/SCons/Platform/__init__.py.
# It differs in using quote_for_at_file (below) in place of
# SCons.Subst.quote_spaces.
class NaClTempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
# The @file is sometimes handled by a GNU tool itself, using
# the libiberty/argv.c code, and sometimes handled implicitly
# by Cygwin before the tool's own main even sees it. These
# two treat the contents differently, so there is no single
# perfect way to quote. The libiberty @file code uses a very
# regular scheme: a \ in any context is always swallowed and
# quotes the next character, whatever it is; '...' or "..."
# quote whitespace in ... and the outer quotes are swallowed.
# The Cygwin @file code uses a vaguely similar scheme, but its
# treatment of \ is much less consistent: a \ outside a quoted
# string is never stripped, and a \ inside a quoted string is
# only stripped when it quoted something (Cygwin's definition
# of "something" here is nontrivial). In our uses the only
# appearances of \ we expect are in Windows-style file names.
# Fortunately, an extra doubling of \\ that doesn't get
# stripped is harmless in the middle of a file name.
def quote_for_at_file(s):
s = str(s)
if ' ' in s or '\t' in s:
return '"' + re.sub('([ \t"])', r'\\\1', s) + '"'
return s.replace('\\', '\\\\')
args = list(map(quote_for_at_file, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def generate(env):
"""SCons entry point for this tool.
Args:
env: The SCons environment in question.
NOTE: SCons requires the use of this name, which fails lint.
"""
# make these methods to the top level scons file
env.AddMethod(ValidateSdk)
env.AddMethod(AddBiasForPNaCl)
env.AddMethod(PNaClForceNative)
env.AddMethod(PNaClGetNNaClEnv)
# Invoke the various unix tools that the NativeClient SDK resembles.
env.Tool('g++')
env.Tool('gcc')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
if env.Bit('pnacl_generate_pexe'):
suffix = '.nonfinal.pexe'
else:
suffix = '.nexe'
env.Replace(
COMPONENT_LINKFLAGS=[''],
COMPONENT_LIBRARY_LINK_SUFFIXES=['.pso', '.so', '.a'],
_RPATH='',
COMPONENT_LIBRARY_DEBUG_SUFFIXES=[],
PROGSUFFIX=suffix,
# adding BASE_ AND EXTRA_ flags to common command lines
# The suggested usage pattern is:
# BASE_XXXFLAGS can only be set in this file
# EXTRA_XXXFLAGS can only be set in a ComponentXXX call
# NOTE: we also have EXTRA_LIBS which is handles separately in
# site_scons/site_tools/component_builders.py
# NOTE: the command lines were gleaned from:
# * ../third_party/scons-2.0.1/engine/SCons/Tool/cc.py
# * ../third_party/scons-2.0.1/engine/SCons/Tool/c++.py
# * etc.
CCCOM='$CC $BASE_CFLAGS $CFLAGS $EXTRA_CFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCCCOM='$SHCC $BASE_CFLAGS $SHCFLAGS $EXTRA_CFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
CXXCOM='$CXX $BASE_CXXFLAGS $CXXFLAGS $EXTRA_CXXFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCXXCOM='$SHCXX $BASE_CXXFLAGS $SHCXXFLAGS $EXTRA_CXXFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
LINKCOM='$LINK $BASE_LINKFLAGS $LINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
SHLINKCOM='$SHLINK $BASE_LINKFLAGS $SHLINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
ASCOM='$AS $BASE_ASFLAGS $ASFLAGS $EXTRA_ASFLAGS -o $TARGET $SOURCES',
ASPPCOM='$ASPP $BASE_ASPPFLAGS $ASPPFLAGS $EXTRA_ASPPFLAGS ' +
'$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES',
# Strip doesn't seem to be a first-class citizen in SCons country,
# so we have to add these *COM, *COMSTR manually.
# Note: it appears we cannot add this in component_setup.py
STRIPFLAGS=['--strip-all'],
STRIPCOM='${STRIP} ${STRIPFLAGS}',
TRANSLATECOM='${TRANSLATE} ${TRANSLATEFLAGS} ${SOURCES} -o ${TARGET}',
PNACLFINALIZEFLAGS=[],
PNACLFINALIZECOM='${PNACLFINALIZE} ${PNACLFINALIZEFLAGS} ' +
'${SOURCES} -o ${TARGET}',
)
# Windows has a small limit on the command line size. The linking and AR
# commands can get quite large. So bring in the SCons machinery to put
# most of a command line into a temporary file and pass it with
# @filename, which works with gcc.
if env['PLATFORM'] in ['win32', 'cygwin']:
env['TEMPFILE'] = NaClTempFileMunge
for com in ['LINKCOM', 'SHLINKCOM', 'ARCOM']:
env[com] = "${TEMPFILE('%s')}" % env[com]
# Get root of the SDK.
root = env.GetToolchainDir()
# if bitcode=1 use pnacl toolchain
if env.Bit('bitcode'):
_SetEnvForPnacl(env, root)
# Get GDB from the nacl-gcc toolchain even when using PNaCl.
# TODO(mseaborn): We really want the nacl-gdb binary to be in a
# separate tarball from the nacl-gcc toolchain, then this step
# will not be necessary.
# See http://code.google.com/p/nativeclient/issues/detail?id=2773
if env.Bit('target_x86'):
temp_env = env.Clone()
temp_env.ClearBits('bitcode')
temp_root = temp_env.GetToolchainDir()
_SetEnvForNativeSdk(temp_env, temp_root)
env.Replace(GDB=temp_env['GDB'])
elif env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
else:
_SetEnvForNativeSdk(env, root)
env.Prepend(LIBPATH='${NACL_SDK_LIB}')
# Install our scanner for (potential) linker scripts.
# It applies to "source" files ending in .a or .so.
# Dependency files it produces are to be found in ${LIBPATH}.
# It is applied recursively to those dependencies in case
# some of them are linker scripts too.
ldscript_scanner = SCons.Scanner.Base(
function=ScanLinkerScript,
skeys=['.a', '.so', '.pso'],
path_function=SCons.Scanner.FindPathDirs('LIBPATH'),
recursive=True
)
env.Append(SCANNERS=ldscript_scanner)
# Scons tests can check this version number to decide whether to
# enable tests for toolchain bug fixes or new features. See
# description in pnacl/build.sh.
if 'toolchain_feature_version' in SCons.Script.ARGUMENTS:
version = int(SCons.Script.ARGUMENTS['toolchain_feature_version'])
else:
version_file = os.path.join(root, 'FEATURE_VERSION')
# There is no pnacl_newlib toolchain on ARM, only a pnacl_translator, so
# use that if necessary. Otherwise use it if we are doing sandboxed
# translation.
if not os.path.exists(version_file) or env.Bit('use_sandboxed_translator'):
version_file = os.path.join(os.path.dirname(root), 'pnacl_translator',
'FEATURE_VERSION')
if os.path.exists(version_file):
with open(version_file, 'r') as fh:
version = int(fh.read())
else:
version = 0
env.Replace(TOOLCHAIN_FEATURE_VERSION=version)
| bsd-3-clause |
google-research/neural-structural-optimization | setup.py | 1 | 1236 | # Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import setuptools
INSTALL_REQUIRES = [
'absl-py',
'apache-beam',
'autograd',
'nlopt',
'numpy',
'matplotlib',
'Pillow',
'scipy',
'scikit-image',
'seaborn',
'xarray',
]
if sys.version_info[:2] < (3, 7):
INSTALL_REQUIRES.append('dataclasses')
setuptools.setup(
name='neural-structural-optimization',
version='0.0.0',
license='Apache 2.0',
author='Google LLC',
author_email='[email protected]',
install_requires=INSTALL_REQUIRES,
url='https://github.com/google-research/neural-structural-optimization',
packages=setuptools.find_packages(),
python_requires='>=3.6')
| apache-2.0 |
adrn/SuperFreq | superfreq/tests/test_simple.py | 1 | 6697 | # coding: utf-8
""" Simple unit tests of SuperFreq """
# Third-party
from astropy.utils import isiterable
import numpy as np
# Project
from ..naff import SuperFreq
def test_cy_naff():
"""
This checks the Cython frequency determination function. We construct a simple
time series with known frequencies and amplitudes and just verify that the
strongest frequency pulled out by NAFF is correct.
"""
from .._naff import naff_frequency
t = np.linspace(0., 300., 12000)
true_ws = 2*np.pi*np.array([0.581, 0.73])
true_as = np.array([5*(np.cos(np.radians(15.)) + 1j*np.sin(np.radians(15.))),
1.8*(np.cos(np.radians(85.)) + 1j*np.sin(np.radians(85.)))])
for p in range(1,4+1): # try different filter exponents, p
ff = SuperFreq(t, p=p)
for sign in [1.,-1.]: # make sure we recover the correct sign of the frequency
true_omegas = true_ws * sign
f = np.sum(true_as[None] * np.exp(1j * true_omegas[None] * t[:,None]), axis=1)
ww = naff_frequency(true_omegas[0], ff.tz, ff.chi,
np.ascontiguousarray(f.real),
np.ascontiguousarray(f.imag),
ff.T)
np.testing.assert_allclose(ww, true_omegas[0], atol=1E-8)
'''
def test_cy_naff_scaling():
"""
This plots how the accuracy in frequency, angle, and phase recovery scales with
a) the number of timesteps
b) the length of the time window
"""
from .._naff import naff_frequency
true_periods = np.array([1.556, 1.7211])
true_ws = 2*np.pi/true_periods
true_as = np.array([5*(np.cos(np.radians(15.)) + 1j*np.sin(np.radians(15.))),
1.8*(np.cos(np.radians(85.)) + 1j*np.sin(np.radians(85.)))])
length_grid = np.round(2**np.arange(4,12+1,0.1)).astype(int)
size_grid = np.round(2**np.arange(4,12+1,0.1)).astype(int)
shp = (length_grid.size, size_grid.size)
xgrid,ygrid = np.meshgrid(length_grid, size_grid)
grid = np.vstack((np.ravel(xgrid), np.ravel(ygrid))).T
p = 1
rel_errs = []
for length,size in grid:
print(length, size)
t = np.linspace(0., length, size)
ff = SuperFreq(t, p=p)
f = np.sum(true_as[None] * np.exp(1j * true_ws[None] * t[:,None]), axis=1)
ww = naff_frequency(true_ws[0], ff.tz, ff.chi,
np.ascontiguousarray(f.real),
np.ascontiguousarray(f.imag),
ff.T)
rel_errs.append(np.abs(true_ws[0] - ww) / true_ws[0])
rel_errs = np.array(rel_errs)
print(rel_errs.shape, shp)
# --
import matplotlib.pyplot as pl
l_xgrid, l_ygrid = np.log2(xgrid), np.log2(ygrid)
dx = l_xgrid[0,1]-l_xgrid[0,0]
dy = l_ygrid[1,0]-l_ygrid[0,0]
# pl.pcolor(np.log2(xgrid), np.log2(ygrid),
# np.log2(rel_errs.reshape(xgrid.shape)), cmap='viridis')
pl.imshow(np.log10(rel_errs.reshape(xgrid.shape)), cmap='viridis', interpolation='nearest',
extent=[l_xgrid.min()-dx/2, l_xgrid.max()+dx/2,
l_ygrid.min()-dy/2, l_ygrid.max()+dy/2],
origin='bottom', vmin=-12, vmax=-1)
pl.xlabel('Window length')
pl.ylabel('Num. timesteps')
pl.colorbar()
pl.gca().set_aspect('equal')
pl.show()
'''
class SimpleBase(object):
""" Need to define:
self.amp
self.omega
self.p
in subclass setup().
"""
def setup(self):
self.A = np.sqrt(self.amp.imag**2 + self.amp.real**2)
self.phi = np.arctan2(self.amp.imag, self.amp.real)
def make_f(self, t):
a = self.amp
w = self.omega
return np.sum(a[None] * np.exp(1j * w[None] * t[:,None]), axis=1)
def test_freq_recovery(self):
# define a bunch of arrays of times to make sure SuperFreq isn't
# sensitive to the times
ts = [np.linspace(0., 150., 12000),
np.linspace(0., 150., 24414),
np.linspace(0., 150., 42104),
np.linspace(150., 300., 12000),
np.linspace(150., 300., 24414),
np.linspace(150., 300., 42104),
np.linspace(0., 150., 12000) + 50*(2*np.pi/self.omega[0])]
for i,t in enumerate(ts):
print(i, t.min(), t.max(), len(t))
f = self.make_f(t)
nfreq = len(self.omega)
if not isiterable(self.p):
ps = [self.p]
else:
ps = self.p
for p in ps:
print(i, p)
# create SuperFreq object for this time array
sf = SuperFreq(t, p=p)
# solve for the frequencies
w,amp,phi = sf.frecoder(f[:sf.n], break_condition=1E-5)
np.testing.assert_allclose(self.omega, w[:nfreq], rtol=1E-7)
np.testing.assert_allclose(self.A, amp[:nfreq], rtol=1E-5)
np.testing.assert_allclose(self.phi, phi[:nfreq], rtol=1E-3)
def test_rolling_window(self):
ts = [np.linspace(0.+dd, 100.+dd, 10000) for dd in np.linspace(0,20,64)]
for i,t in enumerate(ts):
print(i, t.min(), t.max(), len(t))
f = self.make_f(t)
nfreq = len(self.omega)
if not isiterable(self.p):
ps = [self.p]
else:
ps = self.p
for p in ps:
print(i, p)
# create SuperFreq object for this time array
sf = SuperFreq(t, p=p)
# try recovering the strongest frequency
w,amp,phi = sf.frecoder(f[:sf.n], break_condition=1E-5)
np.testing.assert_allclose(self.omega, w[:nfreq], rtol=1E-7)
np.testing.assert_allclose(self.A, amp[:nfreq], rtol=1E-5)
np.testing.assert_allclose(self.phi, phi[:nfreq], rtol=1E-4)
class TestSimple1(SimpleBase):
omega = 2*np.pi*np.array([0.581])
amp = np.array([5*(np.cos(np.radians(15.)) + 1j*np.sin(np.radians(15.)))])
p = 4
class TestSimple2(SimpleBase):
omega = 2*np.pi*np.array([0.581, 0.73])
amp = np.array([5*(np.cos(np.radians(15.)) + 1j*np.sin(np.radians(15.))),
1.8*(np.cos(np.radians(85.)) + 1j*np.sin(np.radians(85.)))])
p = 4
class TestSimple3(SimpleBase):
omega = 2*np.pi*np.array([0.581, 0.73, 0.113])
amp = np.array([5*(np.cos(np.radians(15.)) + 1j*np.sin(np.radians(15.))),
1.8*(np.cos(np.radians(85.)) + 1j*np.sin(np.radians(85.))),
0.7*(np.cos(np.radians(45.)) + 1j*np.sin(np.radians(45.)))])
p = 4
| mit |
achim1/pmttools | setup.py | 1 | 1961 | from setuptools import setup
from pmttools import __version__
def parse_requirements(req_file):
with open(req_file) as f:
reqs = []
for r in f.readlines():
if not r.startswith("http"):
reqs.append(r)
return reqs
try:
requirements = parse_requirements("requirements.txt")
except Exception as e:
print ("Failed parsing requiremnts, installing dummy requirements...")
requirements = ['numpy>=1.9.0',
'matplotlib>=1.5.0',
'pyevsel>=0.0.6',
'futures>=3.0.5',
'future>=0.16.0',
'pyprind>=2.9.6']
setup(name='pmttools',
version=__version__,
description='Analysis of photo multiplier response',
long_description='Photo multiplier tubes (PMTs) are used widely in high energy physics applications. The here provided tools shall help with their characterization in the lab.',
author='Achim Stoessl',
author_email="[email protected]",
url='https://github.com/achim1/pmttolls',
#download_url="pip install pyosci",
install_requires=requirements,
setup_requires=["pytest-runner"],
license="GPL",
platforms=["Ubuntu 14.04","Ubuntu 16.04"],
classifiers=[
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering :: Physics"
],
tests_require=["pytest"],
keywords=["PMT", "photo multiplier tubes",\
"HEP",\
"physics", "engineering", "callibration", "characterization"],
packages=['pmttools'],
#scripts=[],
#package_data={'pyosci': ['pyoscidefault.mplstyle','pyoscipresent.mplstyle']}
)
| gpl-3.0 |
iamjakob/lumiCalc | LumiDB/test/matplotlibLumi.py | 1 | 4192 | import sys
from numpy import arange,sin,pi,random
batchonly=False
def destroy(e) :
sys.exit()
import matplotlib
try:
matplotlib.use('TkAgg',warn=False)
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as CanvasBackend
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
import Tkinter as Tk
root=Tk.Tk()
root.wm_title("Embedding in TK")
except ImportError:
print 'unable to import GUI backend, switch to batch only mode'
matplotlib.use('Agg',warn=False)
from matplotlib.backends.backend_agg import FigureCanvasAgg as CanvasBackend
batchonly=True
from matplotlib.figure import Figure
import matplotlib.ticker as ticker
def drawHTTPstring(fig):
canvas=CanvasBackend(fig)
cherrypy.response.headers['Content-Type']='image/png'
buf=StringIO()
canvas.print_png(buf)
return buf.getvalue()
def drawBatch(fig,filename):
canvas=CanvasBackend(fig)
canvas.print_figure(filename)
def drawInteractive(fig):
if batchonly:
print 'interactive mode is not available for your setup, exit'
sys.exit()
canvas=CanvasBackend(fig,master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP,fill=Tk.BOTH,expand=1)
toolbar=NavigationToolbar2TkAgg(canvas,root)
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP,fill=Tk.BOTH,expand=1)
button = Tk.Button(master=root,text='Quit',command=sys.exit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
def plotDate(fig):
import datetime as dt
ax2=fig.add_subplot(111)
date2_1=dt.datetime(2008,9,23)
date2_2=dt.datetime(2008,10,3)
delta2=dt.timedelta(days=1)
dates2=matplotlib.dates.drange(date2_1,date2_2,delta2)
y2=random.rand(len(dates2))
ax2.set_ylabel(r'Luminosity $\mu$b$^{-1}$')
ax2.plot_date(dates2,y2,linestyle='-')
dateFmt=matplotlib.dates.DateFormatter('%Y-%m-%d')
ax2.xaxis.set_major_formatter(dateFmt)
daysLoc=matplotlib.dates.DayLocator()
hoursLoc=matplotlib.dates.HourLocator(interval=6)
ax2.xaxis.set_major_locator(daysLoc)
ax2.xaxis.set_minor_locator(hoursLoc)
fig.autofmt_xdate(bottom=0.18)
fig.subplots_adjust(left=0.18)
def plotRun(fig):
ax=fig.add_subplot(111)
ax.set_xlabel(r'Run')
ax.set_ylabel(r'Luminosity $\mu$b$^{-1}$')
runlist=[136088,136089,136889,136960,137892]
lumivalues=[0.3,0.6,0.7,0.8,1.0]
#ax.set_xticklabels(runlist)
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_rotation(30)
minorLocator=matplotlib.ticker.MultipleLocator(100)
ax.xaxis.set_minor_locator(minorLocator)
#ax.xaxis.set_major_locator(matplotlib.ticker.LinearLocator(7)
ax.plot(runlist,lumivalues)
ax.plot(runlist,[0.8*x for x in lumivalues])
ax.grid(True)
fig.subplots_adjust(bottom=0.18,left=0.18)
def plotHist(fig):
x=[1,2,3,4,5,6]
y=[1,2,3,4,5,6]
binsize=1
ax=fig.add_subplot(111)
ax.set_xlabel(r'Run')
ax.set_ylabel(r'Luminosity $\mu$b$^{-1}$')
print binsize
#ax.bar(x,y,width=binsize,drawstyle='steps',edgecolor='r',fill=False,label='Recorded')
ax.plot(x,y,drawstyle='steps')
ax.grid(True)
ax.legend()
fig.subplots_adjust(bottom=0.18,left=0.18)
if __name__=='__main__':
fig=Figure(figsize=(5,4),dpi=100)
#a=fig.add_subplot(111)
#timevars=[1,2,3,4] #should be a absolute packed number runnumber+lsnumber
#lumivars=[5,6,7,8]
#use major and minor tickers: major is run,fill or time interval, minor ticker is lumisection. grid is set on major ticker
#a.set_title('luminosity run')
#a.set_xlabel('lumi section')
#a.set_ylabel('Luminosity')
#a.set_xbound(lower=0,upper=5)
#a.set_ybound(lower=0.0,upper=10.5)
#a.set_xticks(range(0,5))
#a.set_xticks(range(1,5,1))
#a.plot(timevars,lumivars,'rs-',linewidth=1.0,label='delivered')
#a.plot(timevars,[v*0.8 for v in lumivars],'gs-',linewidth=1.0,label='recorded')
#a.grid(True)
#a.legend(('delivered','recorded'),loc='upper left')
#drawBatch(fig,'testbatch.png')
#plotDate(fig)
#plotRun(fig)
plotHist(fig)
drawInteractive(fig)
#print drawHTTPstring()
| apache-2.0 |
yunfeilu/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/tests/test_pickle.py | 6 | 8450 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import cPickle as pickle
from matplotlib.externals.six.moves import xrange
from io import BytesIO
from nose.tools import assert_equal, assert_not_equal
import numpy as np
from matplotlib.testing.decorators import cleanup, image_comparison
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
def depth_getter(obj,
current_depth=0,
depth_stack=None,
nest_info='top level object'):
"""
Returns a dictionary mapping:
id(obj): (shallowest_depth, obj, nest_info)
for the given object (and its subordinates).
This, in conjunction with recursive_pickle, can be used to debug
pickling issues, although finding others is sometimes a case of
trial and error.
"""
if depth_stack is None:
depth_stack = {}
if id(obj) in depth_stack:
stack = depth_stack[id(obj)]
if stack[0] > current_depth:
del depth_stack[id(obj)]
else:
return depth_stack
depth_stack[id(obj)] = (current_depth, obj, nest_info)
if isinstance(obj, (list, tuple)):
for i, item in enumerate(obj):
depth_getter(item, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('list/tuple item #%s in '
'(%s)' % (i, nest_info)))
else:
if isinstance(obj, dict):
state = obj
elif hasattr(obj, '__getstate__'):
state = obj.__getstate__()
if not isinstance(state, dict):
state = {}
elif hasattr(obj, '__dict__'):
state = obj.__dict__
else:
state = {}
for key, value in six.iteritems(state):
depth_getter(value, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('attribute "%s" in '
'(%s)' % (key, nest_info)))
return depth_stack
def recursive_pickle(top_obj):
"""
Recursively pickle all of the given objects subordinates, starting with
the deepest first. **Very** handy for debugging pickling issues, but
also very slow (as it literally pickles each object in turn).
Handles circular object references gracefully.
"""
objs = depth_getter(top_obj)
# sort by depth then by nest_info
objs = sorted(six.itervalues(objs), key=lambda val: (-val[0], val[2]))
for _, obj, location in objs:
try:
pickle.dump(obj, BytesIO(), pickle.HIGHEST_PROTOCOL)
except Exception as err:
print(obj)
print('Failed to pickle %s. \n Type: %s. Traceback '
'follows:' % (location, type(obj)))
raise
@cleanup
def test_simple():
fig = plt.figure()
# un-comment to debug
# recursive_pickle(fig)
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(list(xrange(10)), label='foobar')
plt.legend()
# Uncomment to debug any unpicklable objects. This is slow so is not
# uncommented by default.
# recursive_pickle(fig)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# recursive_pickle(ax, 'figure')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(left=list(xrange(10)), height=list(xrange(10)))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(list(xrange(10)))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@cleanup
@image_comparison(baseline_images=['multi_pickle'],
extensions=['png'], remove_text=True)
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
plt.subplot(3, 3, 1)
plt.plot(list(xrange(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
###### plotting is done, now test its pickle-ability #########
# Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
# recursive_pickle(fig)
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert_equal(plt._pylab_helpers.Gcf.figs, {})
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert_not_equal(plt._pylab_helpers.Gcf.figs, {})
assert_equal(fig.get_label(), 'Figure with a label?')
@cleanup
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf as fc
from matplotlib.figure import Figure
fig = Figure()
_ = fc(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@cleanup
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
@cleanup
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
@cleanup
def test_grid():
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.grid()
# Drawing the grid triggers instance methods to be attached
# to the Line2D object (_lineFunc).
manager.canvas.draw()
pickle.dump(ax, BytesIO())
@cleanup
def test_polar():
ax = plt.subplot(111, polar=True)
fig = plt.gcf()
result = BytesIO()
pf = pickle.dumps(fig)
pickle.loads(pf)
plt.draw()
class TransformBlob(object):
def __init__(self):
self.identity = mtransforms.IdentityTransform()
self.identity2 = mtransforms.IdentityTransform()
# Force use of the more complex composition.
self.composite = mtransforms.CompositeGenericTransform(
self.identity,
self.identity2)
# Check parent -> child links of TransformWrapper.
self.wrapper = mtransforms.TransformWrapper(self.composite)
# Check child -> parent links of TransformWrapper.
self.composite2 = mtransforms.CompositeGenericTransform(
self.wrapper,
self.identity)
def test_transform():
obj = TransformBlob()
pf = pickle.dumps(obj)
del obj
obj = pickle.loads(pf)
# Check parent -> child links of TransformWrapper.
assert_equal(obj.wrapper._child, obj.composite)
# Check child -> parent links of TransformWrapper.
assert_equal(list(obj.wrapper._parents.values()), [obj.composite2])
# Check input and output dimensions are set as expected.
assert_equal(obj.wrapper.input_dims, obj.composite.input_dims)
assert_equal(obj.wrapper.output_dims, obj.composite.output_dims)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s'])
| mit |
DrarPan/tensorflow | reinforcement_learning/QLearning/GridWorld.py | 1 | 8451 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
import random
import itertools
import scipy.misc
import os
import cv2 as cv
import numpy as np
import tensorflow as tf
slim=tf.contrib.slim
import matplotlib.pyplot as plt
#%matplotlib inline #Only for Ipython
class gameOb():
def __init__(self,coordinates,size,intensity,channel,reward,name):
self.x=coordinates[0]
self.y=coordinates[1]
self.size=size
self.intensity=intensity
self.channel=channel
self.reward=reward
self.name=name
class gameEnv():
def __init__(self,size):
self.sizeX=size;
self.sizeY=size;
self.actions=4
self.objects=[]
self.window="env"
a=self.reset()
#cv.imshow(self.window,a)
#cv.waitKey(5)
#plt.imshow(a,interpolation="nearest")
def newPosition(self):
iterables=[range(self.sizeX),range(self.sizeY)]
points=[]
for t in itertools.product(*iterables):#combination
points.append(t)
currentPositions=[]
for objectA in self.objects:
if (objectA.x,objectA.y) not in currentPositions:
currentPositions.append((objectA.x,objectA.y))
for pos in currentPositions:
points.remove(pos)
#print(len(points))
location =np.random.choice(range(len(points)),replace=False)
#print("Pos: ",points[location])
return points[location]
def checkGoal(self):
others=[]
for obj in self.objects:
if obj.name=='hero':
hero=obj
else:
others.append(obj)
for other in others:
if hero.x==other.x and hero.y==other.y:
self.objects.remove(other)
if other.reward==1:
self.objects.append(gameOb(self.newPosition(),1,1,1,1,'goal'))
else:
self.objects.append(gameOb(self.newPosition(),1,1,0,-1,'fire'))
return other.reward,False
return 0.0,False
def renderEnv(self):
a=np.ones([self.sizeY+2,self.sizeX+2,3])
a[1:-1,1:-1,:]=0
hero=None
for item in self.objects:
a[item.y+1:item.y+item.size+1,item.x+1:item.x+item.size+1,item.channel]=item.intensity
b=scipy.misc.imresize(a[:,:,0],[84,84,1],interp='nearest')
c=scipy.misc.imresize(a[:,:,1],[84,84,1],interp='nearest')
d=scipy.misc.imresize(a[:,:,2],[84,84,1],interp='nearest')
a=np.stack([b,c,d],axis=2)
return a
def step(self,action):
self.moveChar(action)
reward,done=self.checkGoal()
state=self.renderEnv()
return state,reward,done
def reset(self):
self.objects=[]
hero=gameOb(self.newPosition(),1,1,2,None,'hero')
self.objects.append(hero)
goal=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal)
hole=gameOb(self.newPosition(),1,1,0,-1,"fire")
self.objects.append(hole)
goal2=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal2)
hole2=gameOb(self.newPosition(),1,1,0,-1,"fire")
self.objects.append(hole2)
goal3=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal3)
goal4=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal4)
state=self.renderEnv()
self.state=state
return state
def moveChar(self,direction):
hero=self.objects[0]
heroX=hero.x
heroY=hero.y
if direction==0 and hero.y>=1:
hero.y-=1
if direction==1 and hero.y<self.sizeY-2:
hero.y+=1
if direction==2 and hero.x>=1:
hero.x-=1
if direction==3 and hero.x<self.sizeX-2:
hero.x+=1
self.objects[0]=hero
env=gameEnv(size=5)
class Qnetwork():
def __init__(self,h_size):
self.scalarInput=tf.placeholder(shape=[None,21168],dtype=tf.float32)
self.imageIn=tf.reshape(self.scalarInput,shape=[-1,84,84,3])
self.conv1=tf.contrib.layers.convolution2d(inputs=self.imageIn,num_outputs=32,kernel_size=[8,8],stride=[4,4],padding='VALID',biases_initializer=None)
self.conv2=tf.contrib.layers.convolution2d(inputs=self.conv1,num_outputs=64,kernel_size=[4,4],stride=[2,2],padding='VALID',biases_initializer=None)
self.conv3=tf.contrib.layers.convolution2d(inputs=self.conv2,num_outputs=64,kernel_size=[3,3],stride=[1,1],padding='VALID',biases_initializer=None)
self.conv4=tf.contrib.layers.convolution2d(inputs=self.conv3,num_outputs=h_size,kernel_size=[7,7],stride=[1,1],padding='VALID',biases_initializer=None)
self.streamAC,self.streamVC = tf.split(3,2,self.conv4) #A: the value generated from action, V: the value of environment
self.streamA= tf.contrib.layers.flatten(self.streamAC)
self.streamV= tf.contrib.layers.flatten(self.streamVC)
self.AW=tf.Variable(tf.random_normal([h_size//2,env.actions],dtype=tf.float32))
self.VW=tf.Variable(tf.random_normal([h_size//2,1],dtype=tf.float32))
self.Advantage=tf.matmul(self.streamA,self.AW)
self.Value=tf.matmul(self.streamV,self.VW)
self.Qout=self.Value+tf.subtract(self.Advantage,tf.reduce_mean(self.Advantage,reduction_indices=1,keep_dims=True))
self.predict=tf.argmax(self.Qout,1)
self.targetQ=tf.placeholder(shape=[None],dtype=tf.float32)
self.actions=tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot=tf.one_hot(self.actions,env.actions,dtype=tf.float32)
self.Q=tf.reduce_sum(tf.multiply(self.Qout,self.actions_onehot),reduction_indices=1)
self.td_error=tf.square(self.targetQ-self.Q)
self.loss=tf.reduce_mean(self.td_error)
self.trainer=tf.train.AdamOptimizer(learning_rate=0.0001)
self.updateModel=self.trainer.minimize(self.loss)
class experience_buffer():
def __init__(self,buffer_size=50000):
self.buffer=[]
self.buffer_size=buffer_size
def add(self,experience):
if (len(self.buffer)+len(experience))>=self.buffer_size:
self.buffer[0:len(experience)+len(self.buffer)-self.buffer_size]=[]
self.buffer.extend(experience)
def sample(self,size):
return np.reshape(np.array(random.sample(self.buffer,size)),[size,5])
def processState(states):
return np.reshape(states,[21168])
def updateTargetGraph(tfVars,tau):
total_var=len(tfVars)
op_holder=[]
for idx,var in enumerate(tfVars[0:total_var//2]):
op_holder.append(
tfVars[idx+total_var//2].assign((var.value()*tau)+(tfVars[idx+total_var//2].value()*(1-tau))))
return op_holder
def updateTarget(op_holder,sess):
for op in op_holder:
sess.run(op)
batch_size=32
update_freq=4
y=.99
startE=1
endE=0.1
annealing_steps=10000.
num_episodes=10000
pre_train_steps=10000
max_epLength=50
load_model=False
path="./dqn"
h_size=512
tau=0.001
mainQN=Qnetwork(h_size)
targetQN=Qnetwork(h_size)
trainables=tf.trainable_variables()
targetOps=updateTargetGraph(trainables,tau)
myBuffer=experience_buffer()
e=startE
stepDrop=(startE-endE)/annealing_steps
rList=[]
total_step=0
saver=tf.train.Saver()
if not os.path.exists(path):
os.mkdir(path)
with tf.Session() as sess:
f=open("./Rewards.txt",'w');
if load_model==True:
print('Loading Model...')
ckpt=tf.train.get_checkpoint_state(path)
saver.restore(sess,ckpt.model_checkpoint_path)
sess.run(tf.global_variables_initializer())
updateTarget(targetOps,sess)
for i in range(num_episodes+1):
episodeBuffer=experience_buffer()
s=env.reset()
s=processState(s)
d=False
rAll=0
j=0
while j<max_epLength:
j+=1
if np.random.rand(1)<e or total_step<pre_train_steps:
a=np.random.randint(0,4)
else:
a=sess.run(mainQN.predict,feed_dict={mainQN.scalarInput: [s]})[0]
s1,r,d=env.step(a)
s1=processState(s1)
total_step+=1
episodeBuffer.add(np.reshape([s,a,r,s1,d],[1,5]))
if total_step>pre_train_steps:
if e>endE:
e-=stepDrop
if total_step%(update_freq)==0:
trainBatch=myBuffer.sample(batch_size)
A = sess.run(mainQN.predict,feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:,3])})
Q = sess.run(targetQN.Qout,feed_dict={targetQN.scalarInput: np.vstack(trainBatch[:,3])})
doubleQ=Q[range(batch_size),A]
targetQ=trainBatch[:,2]+y*doubleQ
# print(A)
# print(Q)
# print("____")
# print(doubleQ)
sess.run(mainQN.updateModel,feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:,0]),
mainQN.targetQ: targetQ,
mainQN.actions: trainBatch[:,1]})
updateTarget(targetOps,sess)
rAll+=r
s=s1
if d==True:
break
myBuffer.add(episodeBuffer.buffer)
rList.append(rAll)
if i>0 and i%25==0:
print('episode',i,', average reward of last 25 episode', np.mean(rList[-25:]))
f.write('%.3f\n'%np.mean(rList[-25:]))
if i>0 and i%1000==0:
saver.save(sess,path+'/model-'+str(i)+'.cptk')
print("Save Model")
f.close()
saver.save(sess,path+"/model-"+str(i)+'.cptk')
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/axes_grid1/simple_axisline4.py | 1 | 1442 | """
================
Simple Axisline4
================
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
ax = host_subplot(111)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax2 = ax.twin() # ax2 is responsible for "top" axis and "right" axis
ax2.set_xticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
ax2.set_xticklabels(["$0$", r"$\frac{1}{2}\pi$",
r"$\pi$", r"$\frac{3}{2}\pi$", r"$2\pi$"])
ax2.axis["right"].major_ticklabels.set_visible(False)
ax2.axis["top"].major_ticklabels.set_visible(True)
plt.draw()
pltshow(plt)
| mit |
garibaldu/boundary-seekers | Boundary Hunter Ideas/TensorFlow/AdaptiveMixturesOfLocalExperts.py | 1 | 7316 | import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
import math
np.random.seed(1234)
random.seed(1234)
plt.switch_backend("TkAgg")
def plotScatter(points, color):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys, c=color)
def plot_weights(weights, color):
byas = -1 * weights[0]/weights[2]
Xcoef = -1 * weights[1]/weights[2]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], '{}-'.format(color))
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_split_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x < 25 and x > -25 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_clumps():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, int(totalPoints/2.0)):
x = random.randint(xBounds[0], 0)
y = random.randint(yBounds[0], 0)
if -x - 30 < y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
for i in range(0, int(totalPoints/2.0)):
x = random.randint(0, xBounds[1])
y = random.randint(0, yBounds[1])
if -x + 30 > y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
return np.array(points), np.array(targets)
def init_network(inputs, layers):
network = []
current_in = inputs
for l in layers:
layer = tf.Variable((-0.5 + np.random.rand(l, current_in + 1)), dtype='float64')
current_in = l
network.append(layer)
return network
def apply_network(network, inputs):
current_out = inputs
for layer in network:
current_out = tf.concat([tf.expand_dims(np.repeat([1.0], current_out.shape[0]), 1), current_out], axis=1)
current_out = sigmoid(tf.matmul(current_out, tf.transpose(layer)))
return current_out
def create_bh(inputs, out):
return tf.Variable(np.random.rand(out, inputs + 1), dtype='float64')
def apply_bh(network, inputs):
inputs = tf.concat([tf.expand_dims(np.repeat([1.0], inputs.shape[0]), 1), inputs], axis=1)
return sigmoid(tf.matmul(inputs, tf.transpose(network)))
def sigmoid(tensor):
return 1.0/(1.0 + tf.exp(-tensor))
# Set up the data
random.seed(1234)
points, out = generate_clumps()#generate_split_data()#generateChevronData()
num_bh = 2
one = tf.constant([1.0], dtype='float64')
inpt = tf.placeholder('float64', [2], name='inpt')
target = tf.placeholder('float64', name='target')
inpt_prime = tf.transpose(tf.expand_dims(inpt, 1))
# Create boundary hunters
boundary_hunters = [create_bh(2, 1) for i in range(num_bh)]
# Create gating network
gating_network = init_network(2, [num_bh])
boundary_hunter_outs = [apply_bh(net, inpt_prime)[0][0] for net in boundary_hunters]
gate_out = apply_network(gating_network, inpt_prime)[0]
norm_gate_out = tf.nn.softmax(gate_out)
dif = lambda x: tf.pow(tf.subtract(target, x), 2.0)
o = tf.convert_to_tensor(boundary_hunter_outs, dtype=tf.float64)
square_diff = tf.map_fn(dif, o)
errors = tf.exp((-1.0/2.0) * square_diff)
error = -tf.log(tf.reduce_sum(tf.multiply(norm_gate_out, errors)))
#errors = tf.multiply(norm_gate_out, square_diff)
#error = tf.reduce_sum(errors)
train_op_gate = tf.train.GradientDescentOptimizer(0.1).minimize(error, var_list=gating_network)
train_op_hunters = tf.train.GradientDescentOptimizer(0.0001).minimize(error, var_list=boundary_hunters)
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
#print(session.run(norm_gate_out, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(boundary_hunter_outs, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(o, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(target, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(square_diff, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(errors, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(error, feed_dict={inpt: points[0], target: out[0]}))
## print()
## session.run(train_op, feed_dict={inpt: points[0], target: out[0]})
## print(session.run(norm_gate_out, feed_dict={inpt: points[0], target: out[0]}))
## print(session.run(errors, feed_dict={inpt: points[0], target: out[0]}))
## print(session.run(error, feed_dict={inpt: points[0], target: out[0]}))
#print()
#for i in range(1000):
# session.run(train_op, feed_dict={inpt: points[0], target: out[0]})
#print(session.run(norm_gate_out, feed_dict={inpt: points[0], target: out[0]}))
#print(session.run(error, feed_dict={inpt: points[0], target: out[0]}))
err = 0
for d in range(len(points)):
print(session.run(norm_gate_out, feed_dict={inpt: points[d], target: out[d]}))
err += session.run(error, feed_dict={inpt: points[d], target: out[d]})
print(err)
for e in range(100):
for d in range(len(points)):
session.run(train_op_gate, feed_dict={inpt: points[d], target: out[d]})
session.run(train_op_hunters, feed_dict={inpt: points[d], target: out[d]})
if e % 1000 == 0:
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inpt: points[d], target: out[d]})
print(err)
err = 0
for d in range(len(points)):
print(session.run(norm_gate_out, feed_dict={inpt: points[d], target: out[d]}))
err += session.run(error, feed_dict={inpt: points[d], target: out[d]})
print(err)
final_hunters = session.run(boundary_hunters)
final_gate = session.run(gating_network)
# Plot information
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if out[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1,'y')
plotScatter(c2, 'b')
for bh in final_hunters:
net = bh[0]
print(net)
plot_weights(net, 'k')
#plot_weights(final_gate, 'g')
plt.gca().set_aspect('equal')
plt.show()
| mit |
kylerbrown/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
dokato/connectivipy | connectivipy/plot.py | 1 | 1536 | # -*- coding: utf-8 -*-
#! /usr/bin/env python
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
from six.moves import range
# plain plotting from values
def plot_conn(values, name='', fs=1, ylim=None, xlim=None, show=True):
'''
Plot connectivity estimation results. Allows to plot your results
without using *Data* class.
Args:
*values* : numpy.array
connectivity estimation values in shape (fq, k, k) where fq -
frequency, k - number of channels
*name* = '' : str
title of the plot
*fs* = 1 : int
sampling frequency
*ylim* = None : list
range of y-axis values shown, e.g. [0,1]
*None* means that default values of given estimator are taken
into account
*xlim* = None : list [from (int), to (int)]
range of y-axis values shown, if None it is from 0 to Nyquist frequency
*show* = True : boolean
show the plot or not
'''
fq, k, k = values.shape
fig, axes = plt.subplots(k, k)
freqs = np.linspace(0, fs//2, fq)
if not xlim:
xlim = [0, np.max(freqs)]
if not ylim:
ylim = [np.min(values), np.max(values)]
for i in range(k):
for j in range(k):
axes[i, j].fill_between(freqs, values[:, i, j], 0)
axes[i, j].set_xlim(xlim)
axes[i, j].set_ylim(ylim)
plt.suptitle(name, y=0.98)
plt.tight_layout()
plt.subplots_adjust(top=0.92)
if show:
plt.show()
| bsd-2-clause |
CartoDB/cartoframes | setup.py | 1 | 2389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def walk_subpkg(name):
data_files = []
package_dir = 'cartoframes'
for parent, _, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for _file in files:
data_files.append(os.path.join(sub_dir, _file))
return data_files
def get_version():
_version = {}
with open('cartoframes/_version.py') as fp:
exec(fp.read(), _version)
return _version['__version__']
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.11.2,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.25.0',
'geopandas>=0.6.0,<1.0',
'unidecode>=1.1.0,<2.0',
'semantic_version>=2.8.0,<3'
]
EXTRAS_REQUIRES_TESTS = [
'pytest',
'pytest-mock',
'pylint',
'flake8'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
DISTNAME = 'cartoframes'
DESCRIPTION = 'CARTO Python package for data scientists'
LICENSE = 'BSD'
URL = 'https://github.com/CartoDB/cartoframes'
AUTHOR = 'CARTO'
EMAIL = '[email protected]'
setup(
name=DISTNAME,
version=get_version(),
description=DESCRIPTION,
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
license=LICENSE,
url=URL,
author=AUTHOR,
author_email=EMAIL,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords=['carto', 'data', 'science', 'maps', 'spatial', 'pandas'],
packages=find_packages(),
package_data=PACKAGE_DATA,
package_dir={'cartoframes': 'cartoframes'},
include_package_data=True,
install_requires=REQUIRES,
extras_require={
'tests': EXTRAS_REQUIRES_TESTS
},
python_requires='>=3.5'
)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_topo_compare_conditions.py | 8 | 2192 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
| bsd-3-clause |
MartinSavc/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/core/groupby/grouper.py | 1 | 28910 | """
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
from typing import Dict, Hashable, List, Optional, Tuple
import warnings
import numpy as np
from pandas._typing import FrameOrSeries
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby import ops
from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
.. deprecated:: 1.1.0
loffset is only working for ``.resample(...)`` and not for
Grouper (:issue:`28302`).
However, loffset is also deprecated for ``.resample(...)``
See: :class:`DataFrame.resample`
origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df = pd.DataFrame(
... {
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
... "Speed": [100, 5, 200, 300, 15],
... }
... )
>>> df
Animal Speed
0 Falcon 100
1 Parrot 5
2 Falcon 200
3 Falcon 300
4 Parrot 15
>>> df.groupby(pd.Grouper(key="Animal")).mean()
Speed
Animal
Falcon 200
Parrot 10
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
... {
... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
... pd.Timestamp("2000-01-16")
... ],
... "ID": [0, 1, 2, 3],
... "Price": [10, 20, 30, 40]
... }
... )
>>> df
Publish date ID Price
0 2000-01-02 0 10
1 2000-01-02 1 20
2 2000-01-09 2 30
3 2000-01-16 3 40
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
ID Price
Publish date
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
"""
_attributes: Tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
# hacky way to set the stacklevel: if cls is TimeGrouper it means
# that the call comes from a pandas internal call of resample,
# otherwise it comes from pd.Grouper
stacklevel = 4 if cls is TimeGrouper else 2
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=stacklevel,
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=stacklevel,
)
cls = TimeGrouper
return super().__new__(cls)
def __init__(
self, key=None, level=None, freq=None, axis=0, sort=False, dropna=True
):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
self.dropna = dropna
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate: bool = True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, _, self.obj = get_grouper(
self.obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
dropna=self.dropna,
)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj: FrameOrSeries, sort: bool = False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self.grouper, "name", None) == key and isinstance(
obj, ABCSeries
):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind="mergesort")
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
class Grouping:
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj Union[DataFrame, Series]:
name : Label
level :
observed : bool, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* codes : ndarray, group codes
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(
self,
index: Index,
grouper=None,
obj: Optional[FrameOrSeries] = None,
name=None,
level=None,
sort: bool = True,
observed: bool = False,
in_axis: bool = False,
dropna: bool = True,
):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.all_grouper = None
self.index = index
self.sort = sort
self.obj = obj
self.observed = observed
self.in_axis = in_axis
self.dropna = dropna
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper._values
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError(f"Level {level} not in index")
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
(
self.grouper,
self._codes,
self._group_index,
) = index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper._get_grouper()
else:
if self.grouper is None and self.name is not None and self.obj is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com.asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper, self.all_grouper = recode_for_groupby(
self.grouper, self.sort, observed
)
categories = self.grouper.categories
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._codes = self.grouper.codes
if observed:
codes = algorithms.unique1d(self.grouper.codes)
codes = codes[codes != -1]
if sort or self.grouper.ordered:
codes = np.sort(codes)
else:
codes = np.arange(len(categories))
self._group_index = CategoricalIndex(
Categorical.from_codes(
codes=codes, categories=categories, ordered=self.grouper.ordered
),
name=self.name,
)
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(
self.grouper, (Series, Index, ExtensionArray, np.ndarray)
):
if getattr(self.grouper, "ndim", 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
self.grouper = self.index.map(self.grouper)
if not (
hasattr(self.grouper, "__len__")
and len(self.grouper) == len(self.index)
):
grper = pprint_thing(self.grouper)
errmsg = (
"Grouper result violates len(labels) == "
f"len(data)\nresult: {grper}"
)
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
self.grouper = self.grouper.astype("datetime64[ns]")
elif is_timedelta64_dtype(self.grouper):
self.grouper = self.grouper.astype("timedelta64[ns]")
def __repr__(self) -> str:
return f"Grouping({self.name})"
def __iter__(self):
return iter(self.indices)
_codes: Optional[np.ndarray] = None
_group_index: Optional[Index] = None
@property
def ngroups(self) -> int:
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
return self.grouper.indices
values = Categorical(self.grouper)
return values._reverse_indexer()
@property
def codes(self) -> np.ndarray:
if self._codes is None:
self._make_codes()
return self._codes
@cache_readonly
def result_index(self) -> Index:
if self.all_grouper is not None:
return recode_from_groupby(self.all_grouper, self.sort, self.group_index)
return self.group_index
@property
def group_index(self) -> Index:
if self._group_index is None:
self._make_codes()
assert self._group_index is not None
return self._group_index
def _make_codes(self) -> None:
if self._codes is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
codes = self.grouper.codes_info
uniques = self.grouper.result_index
else:
codes, uniques = algorithms.factorize(
self.grouper, sort=self.sort, dropna=self.dropna
)
uniques = Index(uniques, name=self.name)
self._codes = codes
self._group_index = uniques
@cache_readonly
def groups(self) -> Dict[Hashable, np.ndarray]:
return self.index.groupby(Categorical.from_codes(self.codes, self.group_index))
def get_grouper(
obj: FrameOrSeries,
key=None,
axis: int = 0,
level=None,
sort: bool = True,
observed: bool = False,
mutated: bool = False,
validate: bool = True,
dropna: bool = True,
) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]":
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values.
If validate, then check for key/level overlaps.
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError("No group keys passed!")
else:
raise ValueError("multiple levels only valid with MultiIndex")
if isinstance(level, str):
if obj._get_axis(axis).name != level:
raise ValueError(
f"level name {level} is not the name "
f"of the {obj._get_axis_name(axis)}"
)
elif level > 0 or level < -1:
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, [key.key], obj
# already have a BaseGrouper, just return it
elif isinstance(key, ops.BaseGrouper):
return key, [], obj
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
)
# is this an index replacement?
if (
not any_callable
and not any_arraylike
and not any_groupers
and match_axis_length
and level is None
):
if isinstance(obj, DataFrame):
all_in_columns_index = all(
g in obj.columns or g in obj.index.names for g in keys
)
else:
assert isinstance(obj, Series)
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings: List[Grouping] = []
exclusions: List[Hashable] = []
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
if not _is_label_like(key):
# items -> .columns for DataFrame, .index for Series
items = obj.axes[-1]
try:
items.get_loc(key)
except (KeyError, TypeError, InvalidIndexError):
# TypeError shows up here if we pass e.g. Int64Index
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr) -> bool:
if not hasattr(gpr, "name"):
return False
try:
return gpr is obj[gpr.name]
except (KeyError, IndexError, ValueError):
# TODO: ValueError: Given date string not likely a datetime.
# should be KeyError?
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr, axis=axis):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
"must be same length"
)
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (
Grouping(
group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis,
dropna=dropna,
)
if not isinstance(gpr, Grouping)
else gpr
)
groupings.append(ping)
if len(groupings) == 0 and len(obj):
raise ValueError("No group keys passed!")
elif len(groupings) == 0:
groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
# create the internals grouper
grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val) -> bool:
return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
def _convert_grouper(axis: Index, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError("Grouper and axis must be same length")
return grouper
else:
return grouper
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
survey-methods/samplics | src/samplics/weighting/adjustment.py | 1 | 25028 | """Sample weighting module
*SampleWeight* is the main class in this module which implements weight adjustments to account for
nonresponse, calibrate to auxiliary information, normalize weights, and trim extreme weights. Valliant, R. and Dever, J. A. (2018) [#vd2018]_ provides a step-by-step guide on calculating
sample weights.
.. [#vd2018] Valliant, R. and Dever, J. A. (2018), *Survey Weights: A Step-by-Step Guide to
Calculation*, Stata Press.
"""
from __future__ import annotations
from typing import Optional, Union
import numpy as np
import pandas as pd
from samplics.utils import checks, formats
from samplics.utils.types import (
Array,
DictStrFloat,
DictStrInt,
DictStrNum,
Number,
StringNumber,
)
class SampleWeight:
"""*SampleWeight* implements several adjustments to sample weights. The class does not computes design sample weights. It is expected at this point some initial weights are
available e.g. design sample weights or some other sample weights. Using this module,
the user will be able to adjust sample weights to account for nonresponse, normalize
sample weights so that they sum to some control value(s), poststratify, and calibrate
based on auxiliary information.
Attributes
| adjust_method (str): adjustment method. Possible values are nonresponse,
| normalization, poststratification, calibration.
| number_units (dict): number of units per domain.
| deff_wgt (dict): design effect due to unequal weights per domain.
| adjust_factor (dict): normalizing adjustment factor per domain.
| control (dict): control values per domain.
Methods
| deff_weight(): computes the design effect due to weighting.
| adjust(): adjust the sample weights to account for nonresponse.
| normalize(): normalize the sample weights to ensure they sum to a control value.
| poststratify(): poststratify the sample weights.
| calib_covariables(): covert a dataframe to a tuple of an array and a dictionary.
| The array corresponds to the calibration domains. The dictionary maps the array
| elements with their corresponding control values.
| calibrate(): calibrate the sample weights.
TODO: trim(), rake()
"""
def __init__(self) -> None:
self.adjust_method: str = ""
self.number_units: Union[DictStrInt, int] = {}
self.deff_wgt: Union[DictStrNum, Number] = {}
self.adjust_factor: Union[DictStrNum, Number] = {}
self.control: Union[DictStrNum, Number] = {}
def __repr__(self) -> str:
pass
def __str__(self) -> str:
pass
def _number_units(self, domain: Optional[np.ndarray], samp_weight: np.ndarray) -> None:
"""Returns the number of units"""
if domain is None:
self.number_units = len(samp_weight)
elif domain is not None:
keys, values = np.unique(domain, return_counts=True)
self.number_units = dict(zip(keys, values))
@staticmethod
def _deff_wgt(samp_weight: np.ndarray) -> Number:
"""compute the design effect due to unequal weights -
Page 71 of Valliant and Dever (2018)"""
mean_w = np.mean(samp_weight)
relvar_w = np.power(samp_weight - mean_w, 2) / mean_w ** 2
return float(1 + np.mean(relvar_w))
def deff_weight(
self, samp_weight: Array, domain: Optional[np.ndarray] = None
) -> Union[DictStrNum, Number]:
"""Computes the design effect due to unequal weights.
Args:
samp_weight (Array): array of the pre-adjustment sample weight. This vector
should contains numeric values.
domain (Optional[np.ndarray], optional): array indicating the normalization class
for each sample unit. Defaults to None. Defaults to None.
Returns:
DictStrNum: dictionnary pairing the domains to the design effects due
unequal weights.
"""
samp_weight = formats.numpy_array(samp_weight)
if domain is None:
self.deff_wgt = self._deff_wgt(samp_weight)
return self.deff_wgt
else:
self.deff_wgt = {}
for d in np.unique(domain):
self.deff_wgt[d] = self._deff_wgt(samp_weight[domain == d])
return self.deff_wgt
@staticmethod
def _norm_adjustment(
samp_weight: np.ndarray,
control: Number,
) -> tuple[np.ndarray, Number]:
sum_weights = np.sum(samp_weight)
adjust_factor = float(control / sum_weights)
return np.asarray(samp_weight * adjust_factor), adjust_factor
@staticmethod
def _response(
resp_status: np.ndarray, resp_dict: Optional[dict[str, StringNumber]]
) -> np.ndarray:
resp_status = formats.numpy_array(resp_status)
checks.assert_response_status(resp_status, resp_dict)
if not np.isin(resp_status, ("in", "rr", "nr", "uk")).any() and resp_dict is not None:
resp_code = np.repeat(" ", resp_status.size).astype(str)
resp_code[resp_status == resp_dict["in"]] = "in"
resp_code[resp_status == resp_dict["rr"]] = "rr"
resp_code[resp_status == resp_dict["nr"]] = "nr"
resp_code[resp_status == resp_dict["uk"]] = "uk"
else:
resp_code = resp_status
return resp_code
@staticmethod
def _adjust_factor(
samp_weight: np.ndarray, resp_code: np.ndarray, unknown_to_inelig: bool
) -> tuple[np.ndarray, Number]:
in_sample = resp_code == "in" # ineligible
rr_sample = resp_code == "rr" # respondent
nr_sample = resp_code == "nr" # nonrespondent
uk_sample = resp_code == "uk" # unknown
in_weights_sum = float(np.sum(samp_weight[in_sample]))
rr_weights_sum = float(np.sum(samp_weight[rr_sample]))
nr_weights_sum = float(np.sum(samp_weight[nr_sample]))
uk_weights_sum = float(np.sum(samp_weight[uk_sample]))
if unknown_to_inelig:
adjust_uk = (in_weights_sum + rr_weights_sum + nr_weights_sum + uk_weights_sum) / (
in_weights_sum + rr_weights_sum + nr_weights_sum
)
adjust_rr = (rr_weights_sum + nr_weights_sum) / rr_weights_sum
else:
adjust_uk = 1
adjust_rr = (rr_weights_sum + nr_weights_sum + uk_weights_sum) / rr_weights_sum
adjust_factor = np.zeros(samp_weight.size) # unknown and nonresponse will get 1 by default
adjust_factor[rr_sample] = adjust_rr * adjust_uk
adjust_factor[in_sample] = adjust_uk
return adjust_factor, adjust_rr
def adjust(
self,
samp_weight: Array,
adjust_class: Array,
resp_status: Array,
resp_dict: Optional[Union[dict[str, StringNumber]]] = None,
unknown_to_inelig: bool = True,
) -> np.ndarray:
"""adjusts sample weight to account for non-response.
Args:
samp_weight (np.ndarray): array of the pre-adjustment sample weight. This vector
should contains numeric values.
adjust_class (np.ndarray): array indicating the adjustment class for each sample unit.
The sample weight adjustments will be performed within the classes defined by this
parameter.
resp_status (np.ndarray): array indicating the eligibility and response status of the
sample unit. Values of resp_status should inform on ineligible (in), respondent (rr), nonrespondent (nr), not known / unknown (uk). If the values of the parameter are not in ("in", "rr", "nr", "uk") then the resp_dict is required.
resp_dict (Union[dict[str, StringNumber], None], optional): dictionnary providing the
mapping between the values of resp_status and the ["in", "rr", "nr", "uk"].
For example, if the response status are: 0 for ineligible, 1 for respondent,
2 for nonrespondent, and 9 for unknown. Then the dictionary will be {"in": 0, "rr": 1, "nr": 2, "uk": 9}. If the response status variable has only values in ("in", "rr", "nr", "uk") then the dictionary is not needed. Optional parameter. Defaults to None.
unknown_to_inelig (bool, optional): [description]. Defaults to True.
Raises:
AssertionError: raises an assertion error if adjust_class is not a list, numpy array,
or pandas dataframe/series.
Returns:
np.ndarray: array of the adjusted sample weights.
"""
resp_code = self._response(formats.numpy_array(resp_status), resp_dict)
samp_weight = formats.numpy_array(samp_weight)
adjusted_weight = np.ones(samp_weight.size) * np.nan
if adjust_class is None:
(
adjust_factor,
self.adjust_factor,
) = self._adjust_factor(samp_weight, resp_code, unknown_to_inelig)
adjusted_weight = adjust_factor * samp_weight
else:
if isinstance(adjust_class, list):
adjust_class = pd.DataFrame(np.column_stack(adjust_class))
elif isinstance(adjust_class, np.ndarray):
adjust_class = pd.DataFrame(adjust_class)
elif not isinstance(adjust_class, (pd.Series, pd.DataFrame)):
raise AssertionError(
"adjust_class must be an numpy ndarray, a list of numpy ndarray or a pandas dataframe."
)
adjust_array = formats.dataframe_to_array(adjust_class)
self.adjust_factor = {}
for c in np.unique(adjust_array):
samp_weight_c = samp_weight[adjust_array == c]
resp_code_c = resp_code[adjust_array == c]
adjust_factor_c, self.adjust_factor[c] = self._adjust_factor(
samp_weight_c, resp_code_c, unknown_to_inelig
)
adjusted_weight[adjust_array == c] = adjust_factor_c * samp_weight_c
self.deff_wgt = self.deff_weight(adjusted_weight)
self.adjust_method = "nonresponse"
return np.asarray(adjusted_weight)
@staticmethod
def _core_matrix(
samp_weight: np.ndarray,
x: np.ndarray,
x_weighted_total: np.ndarray,
x_control: np.ndarray,
scale: np.ndarray,
) -> np.ndarray:
v_inv_d = np.diag(samp_weight / scale)
core_matrix = np.dot(np.matmul(np.transpose(x), v_inv_d), x)
if x.shape == (x.size,):
core_factor = (x_control - x_weighted_total) / core_matrix
else:
core_factor = np.matmul(
np.transpose(x_control - x_weighted_total),
np.linalg.inv(core_matrix),
)
return np.asarray(core_factor)
def normalize(
self,
samp_weight: Array,
control: Optional[Union[DictStrNum, Number]] = None,
domain: Optional[Array] = None,
) -> np.ndarray:
"""normalizes the sample weights to sum to a known constants or levels.
Args:
samp_weight (array) : array of the pre-adjustment sample weight. This vector should
contains numeric values.
control (int, float, dictionary) : a number or array of the level to calibrate the
sum of the weights. Default is number of units by domain key or overall if domain
is None. Defaults to None.
domain (Optional[Array], optional) : array indicating the normalization class for each
sample unit. Defaults to None.
Returns:
An arrays: the normalized sample weight.
"""
samp_weight = formats.numpy_array(samp_weight)
norm_weight = samp_weight.copy()
if domain is not None:
domain = formats.numpy_array(domain)
keys = np.unique(domain)
levels: np.ndarray = np.zeros(keys.size) * np.nan
self.adjust_factor = {}
self.control = {}
for k, key in enumerate(keys):
weight_k = samp_weight[domain == key]
if control is None:
levels[k] = np.sum(domain == key)
elif control is not None and isinstance(control, dict):
levels[k] = control[key]
elif isinstance(control, (float, int)):
levels[k] = control
(
norm_weight[domain == key],
self.adjust_factor[key],
) = self._norm_adjustment(weight_k, levels[k])
self.control[key] = levels[k]
else:
if control is None:
self.control = int(np.sum(samp_weight.size))
norm_weight, self.adjust_factor = self._norm_adjustment(samp_weight, self.control)
elif isinstance(control, (int, float)):
norm_weight, self.adjust_factor = self._norm_adjustment(samp_weight, control)
self.control = control
self.adjust_method = "normalization"
return norm_weight
def poststratify(
self,
samp_weight: Array,
control: Optional[Union[DictStrNum, Number]] = None,
factor: Optional[Union[DictStrNum, Number]] = None,
domain: Optional[Array] = None,
) -> np.ndarray:
"""[summary]
Args:
samp_weight (Array): [description]
control (Union[DictStrNum, Number, None], optional): a number or
array of the level to calibrate the sum of the weights. Defaults to None.
factor (Union[DictStrNum, Number, None], optional): adjustment factor.
Defaults to None.
domain (Optional[Array], optional): array indicating the normalization class for each
sample unit. Defaults to None.
Raises:
AssertionError: raises an assertion error if both control and factor are not provided.
ValueError: raises an error is control dictionary keys do not match domain's values.
ValueError: raises an error is factor dictionary keys do not match domain's values.
Returns:
np.ndarray: array of poststratified sample weights.
"""
if control is None and factor is None:
raise AssertionError("control or factor must be specified.")
if isinstance(control, dict):
if (np.unique(domain) != np.unique(list(control.keys()))).any():
raise ValueError("control dictionary keys do not match domain values.")
if control is None and domain is not None:
if (
isinstance(factor, dict)
and (np.unique(domain) != np.unique(list(factor.keys()))).any()
):
raise ValueError("factor dictionary keys do not match domain values.")
sum_weight = float(np.sum(samp_weight))
if isinstance(factor, dict):
control = {}
for d in np.unique(domain):
control[d] = sum_weight * factor[d]
elif isinstance(factor, (int, float)):
control = sum_weight * factor
ps_weight = self.normalize(samp_weight, control, domain)
self.adjust_method = "poststratification"
return ps_weight
def _raked_wgt(
self,
samp_weight: np.ndarray,
X: np.ndarray,
control: Union[DictStrNum, None],
domain: Optional[Array] = None,
) -> None:
pass
def rake(
self,
samp_weight: Array,
x: Union[np.ndarray, pd.DataFrame],
control: Union[DictStrNum, None] = None,
scale: Union[np.ndarray, Number] = 1,
domain: Optional[Array] = None,
) -> np.ndarray:
pass
@staticmethod
def _calib_covariates(
data: pd.DataFrame,
x_cat: Optional[list[str]] = None,
x_cont: Optional[list[str]] = None,
) -> tuple[np.ndarray, DictStrNum]:
if not isinstance(data, pd.DataFrame) or data is None:
raise ValueError("data must be a pandas dataframe.")
if x_cat is None and x_cont is None:
raise AssertionError("x_cat and/or x_cont must be specified.")
else:
if x_cat is not None:
x_concat = formats.dataframe_to_array(data[x_cat])
x_dummies = pd.get_dummies(x_concat)
x_dict = formats.array_to_dict(x_concat)
# x_dummies.insert(0, "intercept", 1)
if x_cont is None and x_dummies is not None:
x_array = x_dummies.astype("int")
elif x_cont is not None and x_dict is not None:
x_array = pd.concat([x_dummies, data[x_cont]], axis=1).astype("int")
x_cont_dict: DictStrNum = {}
nb_obs = data[x_cont].shape[0]
for var in x_cont:
x_cont_dict[var] = nb_obs
x_dict.update(x_cont_dict)
else:
raise AssertionError
return np.asarray(x_array.to_numpy()), x_dict
def calib_covariates(
self,
data: pd.DataFrame,
x_cat: Optional[list[str]] = None,
x_cont: Optional[list[str]] = None,
domain: Optional[list[str]] = None,
) -> tuple[np.ndarray, Union[DictStrNum, dict[StringNumber, DictStrNum]]]:
"""A utility function that creates an array of the calibration groups/domains and
a dictionary pairing the domains with the control values.
Args:
data (pd.DataFrame): input pandas dataframe with the calibration's control data.
x_cat (Optional[list[str]], optional): list of the names of the categorical control
variables. Defaults to None.
x_cont (Optional[list[str]], optional): list of the names of the continuous control
variables. Defaults to None.
domain (Optional[list[str]], optional): list of the names of the variables defining
the normalization classes for each sample unit. Defaults to None.
Raises:
AssertionError: raises an assertion error if input data is not a pandas dataframe.
Returns:
tuple[np.ndarray, Union[DictStrNum, dict[StringNumber, DictStrNum]]]: a tuple of
an array of the calibration domains and a dictionary pairing the domains with the
control values.
"""
if not isinstance(data, (pd.DataFrame, pd.Series)):
raise AssertionError("data must be a pandas dataframe.")
if isinstance(data[x_cat], pd.Series):
nb_cols = (data[x_cat].drop_duplicates()).shape[0] + 1
elif x_cont is None:
nb_cols = (data[x_cat].drop_duplicates()).shape[0]
else:
nb_cols = (data[x_cat].drop_duplicates()).shape[0] + len(x_cont)
x_dict: Union[DictStrNum, dict[StringNumber, DictStrNum]]
if domain is None:
x_array, x_dict = self._calib_covariates(data, x_cat, x_cont)
for key in x_dict:
x_dict[key] = np.nan
else:
x_dict2: dict[StringNumber, DictStrNum] = {}
x_dict_d: DictStrNum
x_array = np.zeros((data.shape[0], nb_cols))
for d in np.unique(data[domain].values):
(
x_array[data[domain] == d, :],
x_dict_d,
) = self._calib_covariates(data[data[domain] == d], x_cat, x_cont)
for key in x_dict_d:
x_dict_d[key] = np.nan
x_dict2[d] = x_dict_d
x_dict = x_dict2
if domain is None:
return x_array, x_dict
else:
return x_array, x_dict
def _calib_wgt(self, x: np.ndarray, core_factor: np.ndarray) -> np.ndarray:
def _core_vector(x_i: np.ndarray, core_factor: np.ndarray) -> np.ndarray:
return np.asarray(np.dot(core_factor, x_i))
if x.shape == (x.size,):
adjust_factor = _core_vector(x, core_factor)
else:
adjust_factor = np.apply_along_axis(
_core_vector, axis=1, arr=x, core_factor=core_factor
)
return adjust_factor
def calibrate(
self,
samp_weight: Array,
aux_vars: Array,
control: Union[dict[StringNumber, Union[DictStrNum, Number]]],
domain: Optional[Array] = None,
scale: Union[Array, Number] = 1,
bounded: bool = False,
additive: bool = False,
) -> np.ndarray:
"""Calibrates the sample weights.
Args:
samp_weight (Array): array of sample weights.
aux_vars (Array): array of auxiliary variables.
control (Union[dict[StringNumber, Union[DictStrNum, Number]], None], optional):
provides the controls by domain if applicable. Defaults to None.
domain (Optional[Array], optional): Array indicating the normalization class for each
sample unit. Defaults to None.
scale (Union[Array, Number], optional): [description]. Defaults to 1.
bounded (bool, optional): [description]. Defaults to False.
additive (bool, optional): [description]. Defaults to False.
Returns:
np.ndarray: an array of the calibrated sample weights.
"""
samp_weight = formats.numpy_array(samp_weight)
aux_vars = formats.numpy_array(aux_vars)
samp_size = samp_weight.size
if domain is not None:
domain = formats.numpy_array(domain)
if isinstance(scale, (float, int)):
scale = np.repeat(scale, samp_size)
else:
scale = formats.numpy_array(scale)
if aux_vars.shape == (samp_size,):
x_w = aux_vars * samp_weight
one_dimension = True
else:
x_w = np.transpose(aux_vars) * samp_weight
one_dimension = False
if domain is None:
if one_dimension:
x_w_total = np.sum(x_w)
else:
x_w_total = np.sum(x_w, axis=1)
core_factor = self._core_matrix(
samp_weight=samp_weight,
x=aux_vars,
x_weighted_total=x_w_total,
x_control=np.array(list(control.values())),
scale=scale,
)
adjust_factor = 1 + self._calib_wgt(aux_vars, core_factor) / scale
else:
domains = np.unique(domain)
if additive:
adjust_factor = np.ones((samp_size, domains.size)) * np.nan
else:
adjust_factor = np.ones(samp_size) * np.nan
for k, d in enumerate(domains):
if one_dimension:
x_w_total = np.sum(x_w)
else:
x_w_total = np.sum(x_w, axis=1)
x_d = aux_vars[domain == d]
samp_weight_d = samp_weight[domain == d]
if one_dimension:
x_w_total_d = np.sum(x_w[domain == d])
else:
x_w_total_d = np.sum(np.transpose(x_w)[domain == d], axis=0)
control_d = control.get(d)
if isinstance(control_d, (int, float)):
control_d_values = [control_d]
elif isinstance(control_d, dict):
control_d_values = list(control_d.values())
else:
raise TypeError("Type of control not valid!")
scale_d = scale[domain == d]
if additive:
core_factor_d = self._core_matrix(
samp_weight=samp_weight,
x=aux_vars,
x_weighted_total=x_w_total_d,
x_control=np.array(control_d_values),
scale=scale,
)
adjust_factor[:, k] = (domain == d) + self._calib_wgt(
aux_vars, core_factor_d
) / scale
else:
core_factor_d = self._core_matrix(
samp_weight=samp_weight_d,
x=aux_vars[domain == d],
x_weighted_total=x_w_total_d,
x_control=np.array(control_d_values),
scale=scale_d,
)
adjust_factor[domain == d] = 1 + self._calib_wgt(x_d, core_factor_d) / scale_d
if additive:
calib_weight = np.transpose(np.transpose(adjust_factor) * samp_weight)
else:
calib_weight = samp_weight * adjust_factor
self.adjust_method = "calibration"
return calib_weight
def trim(
self,
) -> np.ndarray:
pass
| mit |
mthiffau/csvgrapher | grapher.py | 1 | 6977 | #!/usr/bin/python
import argparse, sys, time, os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import deque
class RealTimePlot:
def __init__(self, filename, x_hist, y_range, y_botflex, y_topflex):
'''Create a real time data plot animation.
filename: File to read data from
x_hist: How many units of history to show on the graph
y_range: Minimum y range values
y_botflex: Has the user specified that the graph can expand down?
y_topflex: Has the user specified that the graph can expand up?'''
self.x_hist = x_hist
self.y_range = y_range
self.y_botflex = y_botflex
self.y_topflex = y_topflex
self.xvals = deque() # Queue of x values
self.ysets = [] # Sets of queues of y values
self.ynames = [] # Names for the y sets plotted
# Set up the figure as much as we can
self.fig, self.axis = plt.subplots(num="Real Time Data")
self.axis.set_ylim(y_range[0], y_range[1])
# Read as much of the file as has been written so far
self.filename = filename
self.fileobj = None
try:
self.fileobj = open(filename, 'r')
# Read all in the file so far
newdata = self.fileobj.readline()
while len(newdata) > 0:
x_val, y_vals = self.parseInput(newdata)
if type(x_val) is str:
plt.xlabel(x_val)
self.ynames = y_vals
else:
self.addToPlot(x_val, y_vals)
newdata = self.fileobj.readline()
except Exception as ex:
print("Exception while trying to open file for reading: " + str(ex))
self.fileobj = None
# Plot the data read so far
self.plotlist = []
for yset in self.ysets:
self.plotlist.append(self.xvals)
self.plotlist.append(yset)
self.plotlist.append('-')
if len(self.xvals) > 1:
self.axis.set_xlim(self.xvals[0], self.xvals[-1])
self.lines = self.axis.plot(*self.plotlist)
# Create the legend if we have y labels
if len(self.ynames) > 0:
for i in range(len(self.lines)):
self.lines[i].set_label(self.ynames[i])
handles, labels = self.axis.get_legend_handles_labels()
self.axis.legend(handles, labels)
self.anim = animation.FuncAnimation(self.fig, self.update, interval=100)
def addToPlot(self, x_val, y_vals):
'''Add a new round of data to the plot. One X value, mutiple Y values.'''
# Add the x value to the set
self.xvals.append(x_val)
# Add the y values to their respective sets
for i in range(len(y_vals)):
if len(self.ysets) < (i + 1):
self.ysets.append(deque())
self.ysets[i].append(y_vals[i])
# Purge values if they are too old
while (x_val - self.xvals[0]) > self.x_hist:
self.xvals.popleft()
for yset in self.ysets:
yset.popleft()
def parseInput(self, newdata):
'''Parse a line of input from the file.'''
newdata = newdata.rstrip('\n')
fields = newdata.split(',')
assert(len(fields) > 1)
if fields[0] == 'names':
return (fields[1], fields[2:])
x_val = float(fields[0])
y_vals = list(map(float, fields[1:]))
return (x_val, y_vals)
def update(self, framenum):
'''Called to update the plot by the animation timer.'''
if self.fileobj is not None:
try:
newdata = self.fileobj.readline()
if len(newdata) <= 0:
return self.lines# If nothing new to read, just return
# Parse the input
x_val, y_vals = self.parseInput(newdata)
# If the inputs are labels, regenerate the legend and return
if type(x_val) is str:
plt.xlabel(x_val)
self.ynames = y_vals
if len(self.ynames) > 0:
for i in range(len(self.lines)):
self.lines[i].set_label(self.ynames[i])
handles, labels = self.axis.get_legend_handles_labels()
self.axis.legend(handles, labels)
return self.lines
# Otherwise add the new data to the plot
self.addToPlot(x_val, y_vals)
# Recalculate all the plot axis limits
miny = self.y_range[0]
maxy = self.y_range[1]
local_min = 0
local_max = 0
self.axis.set_xlim(self.xvals[0], self.xvals[-1])
for i in range(len(self.ysets)):
self.lines[i].set_xdata(self.xvals)
self.lines[i].set_ydata(self.ysets[i])
if self.y_botflex:
local_min = min(list(self.ysets[i]))
if local_min > 0:
miny = min(miny, local_min * 0.9)
else:
miny = min(miny, local_min * 1.1)
if self.y_topflex:
local_max = max(list(self.ysets[i]))
if local_max > 0:
maxy = max(maxy, local_max * 1.1)
else:
maxy = max(maxy, local_max * 0.9)
self.axis.set_ylim(miny, maxy)
# Draw the updated plot
self.fig.canvas.draw()
except Exception as ex:
print("Exception while reading from file: " + str(ex))
self.fileobj = None
return self.lines
def close(self):
if self.fileobj is not None:
self.fileobj.close()
self.fileobj = None
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description="Real Time CSV Data Grapher")
parser.add_argument('--xhist', type=float, default=5.0, dest='xhist')
parser.add_argument('--ylow', type=float, default=-1, dest='ylow')
parser.add_argument('--yhigh', type=float, default=1, dest='yhigh')
parser.add_argument('--ytopflex', action='store_true', default=False, dest='ytopflex')
parser.add_argument('--ybotflex', action='store_true', default=False, dest='ybotflex')
parser.add_argument('filename', type=str)
args = parser.parse_args()
# Create plot
plot = RealTimePlot(args.filename, args.xhist, (args.ylow, args.yhigh), args.ybotflex, args.ytopflex)
# Show plot
plt.show()
| mit |
jreback/pandas | asv_bench/benchmarks/eval.py | 8 | 1989 | import numpy as np
import pandas as pd
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
class Eval:
params = [["numexpr", "python"], [1, "all"]]
param_names = ["engine", "threads"]
def setup(self, engine, threads):
self.df = pd.DataFrame(np.random.randn(20000, 100))
self.df2 = pd.DataFrame(np.random.randn(20000, 100))
self.df3 = pd.DataFrame(np.random.randn(20000, 100))
self.df4 = pd.DataFrame(np.random.randn(20000, 100))
if threads == 1:
expr.set_numexpr_threads(1)
def time_add(self, engine, threads):
pd.eval("self.df + self.df2 + self.df3 + self.df4", engine=engine)
def time_and(self, engine, threads):
pd.eval(
"(self.df > 0) & (self.df2 > 0) & (self.df3 > 0) & (self.df4 > 0)",
engine=engine,
)
def time_chained_cmp(self, engine, threads):
pd.eval("self.df < self.df2 < self.df3 < self.df4", engine=engine)
def time_mult(self, engine, threads):
pd.eval("self.df * self.df2 * self.df3 * self.df4", engine=engine)
def teardown(self, engine, threads):
expr.set_numexpr_threads()
class Query:
def setup(self):
N = 10 ** 6
halfway = (N // 2) - 1
index = pd.date_range("20010101", periods=N, freq="T")
s = pd.Series(index)
self.ts = s.iloc[halfway]
self.df = pd.DataFrame({"a": np.random.randn(N), "dates": index}, index=index)
data = np.random.randn(N)
self.min_val = data.min()
self.max_val = data.max()
def time_query_datetime_index(self):
self.df.query("index < @self.ts")
def time_query_datetime_column(self):
self.df.query("dates < @self.ts")
def time_query_with_boolean_selection(self):
self.df.query("(a >= @self.min_val) & (a <= @self.max_val)")
from .pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause |
aditiiyer/CERR | CERR_core/ModelImplementationLibrary/SegmentationModels/ModelDependencies/CT_HeartStructure_DeepLab/dataloaders/utils.py | 4 | 3279 | import matplotlib.pyplot as plt
import numpy as np
import torch
def decode_seg_map_sequence(label_masks, dataset='heart'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'heart':
n_classes = 10
label_colours = get_heart_labels()
elif dataset == 'validation':
n_classes = 10
label_colours = get_heart_struct_labels()
elif dataset == 'heart_struct' or dataset == 'heart_peri' or dataset == 'heart_ventricles' or dataset == 'heart_atria':
n_classes = 2
label_colours = get_heart_labels()
elif dataset == 'validation_struct' or dataset == 'validation_peri' or dataset == 'validation_ventricles' or dataset == 'validation_atria':
n_classes = 2
label_colours = get_heart_struct_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_heart_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def get_heart_labels():
# return np.array with dimensions (10,3)
# [0,1,2,3,4,5,6,7,8,9]
#['unlabelled', HEART', 'AORTA', 'LA', 'LV', 'RA', 'RV', 'IVC', 'SVC', 'PA']
return np.asarray([[0, 0, 0],
[128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0]])
def get_heart_struct_labels():
# return np.array with dimensions (2,3)
# [0,1]
#['unlabelled', HEART']
return np.asarray([[0, 0, 0],
[128, 0, 0]])
| lgpl-2.1 |
LiaoPan/blaze | blaze/compute/tests/test_postgresql_compute.py | 6 | 4809 | from datetime import timedelta
import itertools
import re
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from odo import odo, resource, drop, discover
from blaze import symbol, compute, concat
names = ('tbl%d' % i for i in itertools.count())
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
@pytest.fixture
def url():
return 'postgresql://postgres@localhost/test::%s' % next(names)
@pytest.yield_fixture
def sql(url):
try:
t = resource(url, dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = resource(url, dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables():
dshape = 'var * {a: int32}'
try:
t = resource(url(), dshape=dshape)
u = resource(url(), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = resource(url, dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
data = (1.0,), (float('nan'),)
table = odo(data, sql_with_float)
sym = symbol('s', discover(data))
assert odo(compute(sym.isnan(), table), list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
odo(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}),
pd.DataFrame,
),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table})
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
odo(compute(sym + delta, sql_with_dts), pd.Series) == dates + delta
).all()
assert (
odo(compute(sym - delta, sql_with_dts), pd.Series) == dates - delta
).all()
def test_coerce_bool_and_sum(sql):
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = odo(compute(t.B, sql), pd.Series).gt(1).sum()
assert result == expected
def test_distinct_on(sql):
t = symbol('t', discover(sql))
computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql)
assert normalize(str(computation)) == normalize("""
SELECT DISTINCT ON (anon_1."A") anon_1."A", anon_1."B"
FROM (SELECT {tbl}."A" AS "A", {tbl}."B" AS "B"
FROM {tbl}) AS anon_1 ORDER BY anon_1."A" ASC
""".format(tbl=sql.name))
assert odo(computation, tuple) == (('a', 1), ('b', 2))
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
chris-hld/sfs-python | doc/conf.py | 1 | 10001 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SFS documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 4 14:01:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from subprocess import check_output
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # for sphinx.ext.napoleon
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon', # support for NumPy-style docstrings
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinxcontrib.bibtex',
'matplotlib.sphinxext.plot_directive',
'nbsphinx',
]
# Override kernel name to allow running with Python 2 on Travis-CI
nbsphinx_kernel_name = 'python'
autoclass_content = 'init'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'undoc-members']
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
}
plot_include_source = True
plot_html_show_source_link = False
plot_html_show_formats = False
plot_pre_code = ""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_template']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
authors = 'SFS Toolbox Developers'
project = 'SFS Toolbox'
copyright = '2017, ' + authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.0.0'
# The full version, including alpha/beta/rc tags.
try:
release = check_output(['git', 'describe', '--tags', '--always'])
release = release.decode().strip()
except Exception:
release = '<unknown>'
binder_base_url = 'https://mybinder.org/v2/gh/sfstoolbox/sfs-python/'
extlinks = {'binder': (binder_base_url + release + '?filepath=%s', 'binder:')}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**/.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
def setup(app):
"""Include custom theme files to sphinx HTML header"""
app.add_stylesheet('css/title.css')
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapse_navigation': False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + ", version " + release
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SFS'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'printindex': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'SFS.tex', project, authors, 'howto')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [('index', 'sfs', project, [authors], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# ('index', 'SFS', project, project, 'SFS', 'Sound Field Synthesis Toolbox.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for epub output ----------------------------------------------
epub_author = authors
| mit |
Arafatk/sympy | doc/ext/docscrape_sphinx.py | 51 | 9709 | from __future__ import division, absolute_import, print_function
import sys
import re
import inspect
import textwrap
import pydoc
import sphinx
import collections
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
luo66/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
prateeknepaliya09/rodeo | rodeo/kernel.py | 8 | 7985 | # start compatibility with IPython Jupyter 4.0+
try:
from jupyter_client import BlockingKernelClient
except ImportError:
from IPython.kernel import BlockingKernelClient
# python3/python2 nonsense
try:
from Queue import Empty
except:
from queue import Empty
import atexit
import subprocess
import uuid
import time
import os
import sys
import json
__dirname = os.path.dirname(os.path.abspath(__file__))
vars_patch = """
import json
try:
import pandas as pd
except:
pd = None
def __get_variables():
if not pd:
print('[]')
variable_names = globals().keys()
data_frames = []
for v in variable_names:
if v.startswith("_"):
continue
if isinstance(globals()[v], pd.DataFrame):
data_frames.append({
"name": v,
"dtype": "DataFrame"
})
print(json.dumps(data_frames))
"""
class Kernel(object):
def __init__(self, active_dir, pyspark):
# kernel config is stored in a dot file with the active directory
config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4()))
# right now we're spawning a child process for IPython. we can
# probably work directly with the IPython kernel API, but the docs
# don't really explain how to do it.
log_file = None
if pyspark:
os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config
pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark")
spark_log = os.environ.get("SPARK_LOG", None)
if spark_log:
log_file = open(spark_log, "w")
spark_opts = os.environ.get("SPARK_OPTS", "")
args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS>
p = subprocess.Popen(args, stdout=log_file, stderr=log_file)
else:
args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# when __this__ process exits, we're going to remove the ipython config
# file and kill the ipython subprocess
atexit.register(p.terminate)
def remove_config():
if os.path.isfile(config):
os.remove(config)
atexit.register(remove_config)
# i found that if i tried to connect to the kernel immediately, so we'll
# wait until the config file exists before moving on
while os.path.isfile(config)==False:
time.sleep(0.1)
def close_file():
if log_file:
log_file.close()
atexit.register(close_file)
# fire up the kernel with the appropriate config
self.client = BlockingKernelClient(connection_file=config)
self.client.load_connection_file()
self.client.start_channels()
# load our monkeypatches...
self.client.execute("%matplotlib inline")
self.client.execute(vars_patch)
def _run_code(self, code, timeout=0.1):
# this function executes some code and waits for it to completely finish
# before returning. i don't think that this is neccessarily the best
# way to do this, but the IPython documentation isn't very helpful for
# this particular topic.
#
# 1) execute code and grab the ID for that execution thread
# 2) look for messages coming from the "iopub" channel (this is just a
# stream of output)
# 3) when we get a message that is one of the following, save relevant
# data to `data`:
# - execute_result - content from repr
# - stream - content from stdout
# - error - ansii encoded stacktrace
# the final piece is that we check for when the message indicates that
# the kernel is idle and the message's parent is the original execution
# ID (msg_id) that's associated with our executing code. if this is the
# case, we'll return the data and the msg_id and exit
msg_id = self.client.execute(code)
output = { "msg_id": msg_id, "output": None, "image": None, "error": None }
while True:
try:
reply = self.client.get_iopub_msg(timeout=timeout)
except Empty:
continue
if "execution_state" in reply['content']:
if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id:
if reply['parent_header']['msg_type']=="execute_request":
return output
elif reply['header']['msg_type']=="execute_result":
output['output'] = reply['content']['data'].get('text/plain', '')
elif reply['header']['msg_type']=="display_data":
output['image'] = reply['content']['data'].get('image/png', '')
elif reply['header']['msg_type']=="stream":
output['output'] = reply['content'].get('text', '')
elif reply['header']['msg_type']=="error":
output['error'] = "\n".join(reply['content']['traceback'])
def execute(self, code):
return self._run_code(code)
def complete(self, code, timeout=0.1):
# Call ipython kernel complete, wait for response with the correct msg_id,
# and construct appropriate UI payload.
# See below for an example response from ipython kernel completion for 'el'
#
# {
# 'parent_header':
# {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_request',
# u'msg_id': u'5222d158-ada8-474e-88d8-8907eb7cc74c', u'session': u'cda4a03d-a8a1-4e6c-acd0-de62d169772e',
# u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 796886)},
# 'msg_type': u'complete_reply',
# 'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d',
# 'content':
# {u'matches': [u'elif', u'else'], u'status': u'ok', u'cursor_start': 0, u'cursor_end': 2, u'metadata': {}},
# 'header':
# {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_reply',
# u'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', u'session': u'f1491112-7234-4782-8601-b4fb2697a2f6',
# u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 803470)},
# 'buffers': [],
# 'metadata': {}
# }
#
msg_id = self.client.complete(code)
output = { "msg_id": msg_id, "output": None, "image": None, "error": None }
while True:
try:
reply = self.client.get_shell_msg(timeout=timeout)
except Empty:
continue
if "matches" in reply['content'] and reply['msg_type']=="complete_reply" and reply['parent_header']['msg_id']==msg_id:
results = []
for completion in reply['content']['matches']:
result = {
"value": completion,
"dtype": "---"
}
if "." in code:
result['text'] = ".".join(result['value'].split(".")[1:])
result["dtype"] = "function"
else:
# result['text'] = result['value'].replace(code, '', 1)
result['text'] = result['value']
result["dtype"] = "session variable" # type(globals().get(code)).__name__
results.append(result)
jsonresults = json.dumps(results)
output['output'] = jsonresults
return output
#else:
#Don't know what to do with the rest.
#I've observed parent_header msg_types: kernel_info_request, execute_request
#Just discard for now
def get_dataframes(self):
return self.execute("__get_variables()")
| bsd-2-clause |
RJTK/dwglasso_cweeds | src/data/calculate_covars.py | 1 | 6158 | '''
A 'helper' script to calculate and subsequently cache the
covariance matrices ZZT and YZT. This is time consuming so it's
certainly wise to cache this caculation. This is basically a prereq
to running the dwglasso algorithm.
NOTE: This file is intended to be executed by make from the top
level of the project directory hierarchy. We rely on os.getcwd()
and it will not work if run directly as a script from this directory.
'''
import sys
import pandas as pd
import numpy as np
from itertools import combinations_with_replacement, repeat, starmap
from src.conf import ZZT_FILE_PREFIX, YZT_FILE_PREFIX, HDF_FINAL_FILE,\
LOCATIONS_KEY, MAX_P, TEMPERATURE_TS_ROOT
def periodogram_covar(x: np.array, y: np.array, tau: int, p: int):
'''Takes in numpy arrays x and y, an int value tau for the lag and
another int p for the maximum lag and returns the periodogram
estimate of the covariance.
'''
assert np.allclose(x.mean(), 0), 'Signal x must be 0 mean!'
assert np.allclose(y.mean(), 0), 'Signal y must be 0 mean!'
T = len(x) - p
if tau == 0:
return (1 / T) * np.dot(x[p:], y[p:])
else:
return (1 / T) * np.dot(x[p:], y[p - tau:-tau])
# Exists essentially just to help implement the convenient notation in
# the function periodogram_covar_matrices.
class ColSelector(object):
'''
A helper object to select out the temperature data from our hdf store.
'''
def __init__(self, hdf: pd.io.pytables.HDFStore, keys, column: str):
'''Takes in an HDFStore D, an iterable of keys, and the column we
want to select from each of the key locations. Precisely,
each key location (hdf[keys[i]]) should contain a pd.DataFrame
object D having the given column: D[column].
If we have t = ColSelector(hdf, keys, column) then t.keys()
will simply return keys, and t[k] will return
hdf[k][column]
'''
self._keys = keys
self.column = column
self.hdf = hdf
self.shape = (len(self.hdf[self._keys[0]]), len(self._keys))
return
def __getitem__(self, k):
'''selector[k]'''
return self.hdf[k][self.column]
def keys(self):
return self._keys
# Use the ColSelector class to give convenient access to an HDFStore
# e.g. give ColSelector the keys we want to iterate through, and the
# column we want to access.
def periodogram_covar_matrices(D, p: int):
'''Makes use of the helper functions periodogram_covar to calculate
the covariance matrices Rx(0) ... Rx(p) where x_i is the i'th
column of D. The matrices Rx(0) ... Rx(p - 1) form the top row
of ZZT and Rx(1) ... Rx(p) form YZT.
We will return the raw estimates of covariances. For large
systems, these estimates are unlikely to be positive semidefinite.
Shrinkage and regularization should be added later in the
pipeline.
'''
n = D.shape[1]
Rx = np.zeros((n, n * (p + 1)))
# I'm unsure about the ordering conventions for D.keys()
# when called on a pd.DataFrame. But, in this case I'm only
# using it with my own ColSelector class, where I know that
# D.keys() is a genuine, ordered list. This ordering is of
# critical importance for later plotting as I'm using no other
# way to keep track of which covariance corresponds to which
# weather station.
for ixi, jxj in combinations_with_replacement(
enumerate(D.keys()), 2):
i, xi = ixi
j, xj = jxj
print('p = %d, Cov(x%d, x%d)' % (p, i, j), end='\r')
sys.stdout.flush()
xi = D[xi].values # D[xi] should be a pd.Series.
xj = D[xj].values
Rx[i, j::n] = np.fromiter(starmap(periodogram_covar,
zip(repeat(xi, p + 1),
repeat(xj, p + 1),
range(p + 1),
repeat(p, p + 1))),
float, count=p + 1)
# Fill in the rest by symmetry
Rx[j, i::n] = Rx[i, j::n]
print()
Rx = list(np.split(Rx, p + 1, axis=1))
return Rx
def form_ZZT(Rx, delta=0.01):
'''Forms the matrix ZZT from the list of Rx matrices
Rx = [Rx(0) Rx(1) ... Rx(p)] (n x n*(p + 1))
ZZT is a np x np block toeplitz form from the 0 to p - 1 lagged
covariance matrices of the n-vector x(t).
CARE: The matrix returned from this function is unlikely to be
positive semidefinite for large n. The shrinkage and other
manipulation necessary to ensure ZZT > 0 should be handled later
in the pipeline and the parameters involved should be considered
as true parameters of the model.
'''
p = len(Rx) - 1
n = Rx[0].shape[0]
ZZT = np.zeros((n * p, n * p))
for i in range(p):
for j in range(p):
ZZT[i * n:(i + 1) * n, j * n:(j + 1) * n] = Rx[abs(i - j)]
return ZZT
def form_YZT(Rx):
'''Forms the matrix YZT from the list of Rx matrices
Rx = [Rx(0) Rx(1) ... Rx(p)] (n x n*(p + 1))
YZT is an n x np matrix [Rx(1) ... Rx(p)]
'''
YZT = np.hstack(Rx[1:]) # [Rx(1) ... Rx(p)]
return YZT
def main():
hdf_final = pd.HDFStore(HDF_FINAL_FILE, mode='a')
wbans = hdf_final[LOCATIONS_KEY]['WBAN']
# The ORDER of this array is of CRITICAL importance for later plotting
keys = [TEMPERATURE_TS_ROOT + '/wban_' + wban + '/D'
for wban in wbans]
col = 'dT'
D = ColSelector(hdf_final, keys, col)
for p in range(1, MAX_P + 1):
Rxp = periodogram_covar_matrices(D, p)
ZZTp = form_ZZT(Rxp)
YZTp = form_YZT(Rxp)
np.save(ZZT_FILE_PREFIX + str(p) + '_dT', ZZTp)
np.save(YZT_FILE_PREFIX + str(p) + '_dT', YZTp)
col = 'T'
D = ColSelector(hdf_final, keys, col)
for p in range(1, MAX_P + 1):
Rxp = periodogram_covar_matrices(D, p)
ZZTp = form_ZZT(Rxp)
YZTp = form_YZT(Rxp)
np.save(ZZT_FILE_PREFIX + str(p) + '_T', ZZTp)
np.save(YZT_FILE_PREFIX + str(p) + '_T', YZTp)
return
if __name__ == '__main__':
main()
| mit |
Kolyan-1/MSc-Thesis-Code | Data/synthetic1.py | 1 | 1507 | ######################################
#
# Nikolai Rozanov (C) 2017-Present
#
# [email protected]
#
#####################################
#
# the bottom part of this file is not by me (as is indicated below)
#
import numpy as np
from sklearn.utils import check_random_state
def circle(n,var,rs=1):
rs = check_random_state(rs)
xvec = np.linspace(0,2*np.pi,n)
X = np.zeros([n,2])
X[:,0] = np.cos(xvec) + rs.normal(0,var,n)
X[:,1] = np.sin(xvec) + rs.normal(0,var,n)
mu = np.zeros(2)
sigma = np.eye(2)
Y = rs.multivariate_normal(mu, sigma, size=n)
return X
######################################
#
# THE CODE BELOW IS NOT MY CODE
# SOURCE GITHUB: https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/generate.py
#####################################
def gaussian(n,corr,rs=1):
rs = check_random_state(rs)
mu = np.zeros(2)
correlation = corr
corr_sigma = np.array([[1, correlation], [correlation, 1]])
Y = rs.multivariate_normal(mu, corr_sigma, size=n)
return Y
def blobs(n, corr, rows=5, cols=5, sep=10, rs=1):
rs = check_random_state(rs)
# ratio is eigenvalue ratio
correlation = corr
# generate within-blob variation
mu = np.zeros(2)
sigma = np.eye(2)
corr_sigma = np.array([[1, correlation], [correlation, 1]])
Y = rs.multivariate_normal(mu, corr_sigma, size=n)
Y[:, 0] += rs.randint(rows, size=n) * sep
Y[:, 1] += rs.randint(cols, size=n) * sep
return Y
| bsd-3-clause |
projectcuracao/projectcuracao | graphprep/environcolor.py | 1 | 3436 | # environmental color graph
# filename:environmentalgraph.py
# Version 1.0 10/13/13
#
# contains event routines for data collection
#
#
import sys
import time
import RPi.GPIO as GPIO
import gc
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import dates
import pylab
import MySQLdb as mdb
sys.path.append('/home/pi/ProjectCuracao/main/config')
# if conflocal.py is not found, import default conf.py
# Check for user imports
try:
import conflocal as conf
except ImportError:
import conf
def environcolor(source,days,delay):
print("environmentalgraph source:%s days:%s" % (source,days))
print("sleeping seconds:", delay)
time.sleep(delay)
print("envrironmentalgraph running now")
# blink GPIO LED when it's run
GPIO.setmode(GPIO.BOARD)
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
time.sleep(0.5)
GPIO.output(22, True)
# now we have get the data, stuff it in the graph
try:
print("trying database")
db = mdb.connect('localhost', 'root', conf.databasePassword, 'ProjectCuracao');
cursor = db.cursor()
query = "SELECT TimeStamp, Red_Color, Blue_Color, Green_Color, Clear_Color FROM environmentaldata where now() - interval %i hour < TimeStamp" % (days*24)
#query = "SELECT TimeStamp, InsideTemperature, InsideHumidity, OutsideTemperature, BarometricPressure, Luminosity, FanState FROM environmentaldata where now() - interval %i hour < TimeStamp" % (days*24)
cursor.execute(query)
result = cursor.fetchall()
t = []
s = []
u = []
v = []
x = []
for record in result:
t.append(record[0])
s.append(record[1])
u.append(record[2])
v.append(record[3])
x.append(record[4])
#dts = map(datetime.datetime.fromtimestamp, s)
#fds = dates.date2num(dts) # converted
# matplotlib date format object
hfmt = dates.DateFormatter('%m/%d-%H')
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(dates.HourLocator(interval=6))
ax.xaxis.set_major_formatter(hfmt)
pylab.xticks(rotation='vertical')
pyplot.subplots_adjust(bottom=.3)
pylab.plot(t, s, color='r',label="Red Value",linestyle="-",marker=".")
pylab.plot(t, u, color='b',label="Blue Value",linestyle="-",marker=".")
pylab.plot(t, v, color='g',label="Green Value",linestyle="-",marker=".")
pylab.plot(t, x, color='k',label="Clear Color",linestyle="-",marker=".")
pylab.xlabel("Hours")
pylab.ylabel("Value")
pylab.legend(loc='upper left')
maxredcolor = max(s)
maxgreencolor = max(u)
maxbluecolor = max(v)
maxclearcolor = max(x)
maxvalue = max(maxredcolor, maxgreencolor, maxbluecolor, maxclearcolor)
pylab.axis([min(t), max(t), 0, maxvalue])
pylab.figtext(.5, .05, ("Light Color Statistics Last %i Days" % days),fontsize=18,ha='center')
#pylab.grid(True)
pyplot.setp( ax.xaxis.get_majorticklabels(), rotation=70)
ax.xaxis.set_major_formatter(dates.DateFormatter('%m/%d-%H'))
pyplot.show()
pyplot.savefig("/home/pi/RasPiConnectServer/static/environmentalcolorgraph.png")
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
finally:
cursor.close()
db.close()
del cursor
del db
fig.clf()
pyplot.close()
pylab.close()
del t, s, u, v, x
gc.collect()
print("envrironmentalgraph finished now")
| gpl-3.0 |
alex1818/industrial_training | training/orig/demo_descartes/src/plan_and_run/src/generate_lemniscate_trajectory.py | 12 | 1825 | #!/usr/bin/env python
import numpy
import math
import matplotlib.pyplot as pyplot
from mpl_toolkits.mplot3d import Axes3D
def generateLemniscatePoints():
# 3D plotting setup
fig = pyplot.figure()
ax = fig.add_subplot(111,projection='3d')
a = 6.0
ro = 4.0
dtheta = 0.1
nsamples = 200
nlemniscates = 4
epsilon = 0.0001
# polar angle
theta = numpy.concatenate([numpy.linspace(-math.pi/4 + epsilon,math.pi/4 - epsilon,nsamples/2,endpoint=True),
numpy.linspace(3*math.pi/4 + epsilon, 5*math.pi/4- epsilon,nsamples/2,endpoint=True)])
# offset from polar angle
omega = numpy.linspace(0.0,math.pi,nlemniscates,endpoint=False)
r = len(theta)*[0]
x = (len(theta)*len(omega))*[0]
y = (len(theta)*len(omega))*[0]
z = (len(theta)*len(omega))*[0]
for j in range(0,len(omega)):
index_offset = j*len(theta)
for i in range(0,len(theta)):
r[i] = math.sqrt(math.pow(a,2)*math.cos(2*theta[i]))
index = index_offset + i
phi = math.asin(r[i]/ro) if r[i] < ro else (math.pi - math.asin((2*ro-r[i])/ro) )
x[index] = ro*math.cos(theta[i] + omega[j]) * math.sin(phi)
y[index] = ro*math.sin(theta[i] + omega[j]) * math.sin(phi)
z[index] = ro*math.cos(phi)
#print "omega array: %s"%(str(omega))
#print "x array: %s"%(str(x))
#print "z array: %s"%(str(z))
axis_size = 1.2*ro
ax.plot(x, y, z, label='parametric curve',marker='.',color='yellow', linestyle='dashed',markerfacecolor='blue')
ax.legend()
ax.set_xlabel('X')
ax.set_xlim(-axis_size, axis_size)
ax.set_ylabel('Y')
ax.set_ylim(-axis_size, axis_size)
ax.set_zlabel('Z')
ax.set_zlim(-axis_size, axis_size)
pyplot.show()
if __name__ == "__main__":
generateLemniscatePoints()
| apache-2.0 |
abigailStev/lag_spectra | simple_plot_lag-freq.py | 1 | 3854 | #!/usr/bin/env
"""
Plots the lag-frequency spectrum.
Example call:
python simple_plot_lag-freq.py ./cygx1_lag-freq.fits -o "./cygx1" --ext "png"
Enter python simple_plot_lag-freq.py -h at the command line for help.
"""
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import ScalarFormatter
from astropy.table import Table
import argparse
import subprocess
import numpy as np
__author__ = "Abigail Stevens <A.L.Stevens at uva.nl>"
__year__ = "2016"
################################################################################
def main(lag_file, out_base="./out", plot_ext="eps"):
lag_table = Table.read(lag_file)
# x_err = np.repeat(lag_table.meta['DF'], len(lag_table['FREQUENCY']))
font_prop = font_manager.FontProperties(size=20)
plot_file = out_base + "_lag-freq." + plot_ext
print("Lag-frequency spectrum: %s" % plot_file)
fig, ax = plt.subplots(1, 1, figsize=(10, 7.5), dpi=300, tight_layout=True)
ax.plot([lag_table['FREQUENCY'][0], lag_table['FREQUENCY'][-1]], [0, 0],
lw=1.5, ls='dashed', c='black')
# ax.plot([freq[0], freq[-1]],[np.pi,np.pi], lw=1.5, ls='dashed', c='black')
# ax.plot([freq[0], freq[-1]],[-np.pi,-np.pi], lw=1.5, ls='dashed', c='black')
# ax.errorbar(lag_table['FREQUENCY'], lag_table['PHASE_LAG'], xerr=x_err,
# yerr=lag_table['PHASE_LAG_ERR'], marker='o', ms=10, mew=2,
# mec='blue', fillstyle='none', ecolor='blue', elinewidth=2,
# capsize=0)
ax.errorbar(lag_table['FREQUENCY'], lag_table['TIME_LAG'],
yerr=lag_table['TIME_LAG_ERR'], marker='o', ms=10, mew=2,
mec='blue', fillstyle='none', ecolor='blue', elinewidth=2,
capsize=0)
ax.set_xlabel('Frequency (Hz)', fontproperties=font_prop)
ax.set_ylabel('Time lag (s)', fontproperties=font_prop)
# ax.set_ylabel('Phase lag (radians)', fontproperties=font_prop)
# ax.set_xlim(lo_freq, up_freq)
ax.set_xlim(3, 7)
ax.set_ylim(-1, 1)
# ax.set_ylim(1.3 * np.min(lag_table['TIME_LAG']),
# 1.3 * np.max(lag_table['TIME_LAG']))
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(which='major', width=1.5, length=7)
ax.tick_params(which='minor', width=1.5, length=4)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(1.5)
title = "Lag-frequency spectrum"
ax.set_title(title, fontproperties=font_prop)
plt.savefig(plot_file)
# plt.show()
plt.close()
# subprocess.call(['open', plot_file])
################################################################################
if __name__ == "__main__":
#########################################
## Parse input arguments and call 'main'
#########################################
parser = argparse.ArgumentParser(usage="python simple_cross_spectra.py "
"lag_file [OPTIONAL ARGUMENTS]",
description=__doc__,
epilog="For optional arguments, default values are given in "\
"brackets at end of description.")
parser.add_argument('lag_file', help="The FITS file with the lag-frequency "
"spectrum saved as an astropy table.")
parser.add_argument('-o', '--out', default="./out", dest='outbase',
help="The base name for plot file. Extension will be "
"appended. [./out]")
parser.add_argument('--ext', default="eps", dest='plot_ext',
help="The plot extension. Do not include the dot. "
"[eps]")
args = parser.parse_args()
main(args.lag_file, args.outbase, args.plot_ext)
| mit |
exa-analytics/exatomic | exatomic/adf/output.py | 2 | 25123 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
ADF Composite Output
#########################
This module provides the primary (user facing) output parser.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from collections import defaultdict
import re
import six
import numpy as np
import pandas as pd
from io import StringIO
from exa.util.units import Length
from exa import TypedMeta
from exatomic.base import sym2z
from exatomic.algorithms.basis import lmap, enum_cartesian
from exatomic.algorithms.numerical import dfac21
from exatomic.core.atom import Atom, Frequency
from exatomic.core.gradient import Gradient
from exatomic.core.basis import BasisSet, BasisSetOrder
from exatomic.core.orbital import Orbital, Excitation, MOMatrix
from exatomic.core.tensor import NMRShielding, JCoupling
from .editor import Editor
class OutMeta(TypedMeta):
atom = Atom
basis_set = BasisSet
basis_set_order = BasisSetOrder
orbital = Orbital
contribution = pd.DataFrame
excitation = Excitation
momatrix = MOMatrix
sphr_momatrix = MOMatrix
gradient = Gradient
frequency = Frequency
nmr_shielding = NMRShielding
j_coupling = JCoupling
class Output(six.with_metaclass(OutMeta, Editor)):
"""The ADF output parser."""
def parse_atom(self):
# TODO : only supports single frame, gets last atomic positions
# this will actually get the very first coordinates
#_re_atom_00 = 'Atoms in this Fragment Cart. coord.s (Angstrom)'
_re_atom_00 = 'ATOMS'
found1 = self.find(_re_atom_00, keys_only=True)
# use the regex instead of find because we have a similar search string in an nmr and
# cpl calculation for the nuclear coordinates
_reatom = "(?i)NUCLEAR COORDINATES"
found2 = self.regex(_reatom, keys_only=True)
# to find the optimized frames
_reopt = "Coordinates (Cartesian)"
found_opt = self.find(_reopt, keys_only=True)
if found_opt:
starts = np.array(found_opt) + 6
stop = starts[0]
while '------' not in self[stop]: stop += 1
stops = starts + stop - starts[0]
dfs = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
# parse everything as they may be useful in the future
df = self.pandas_dataframe(start, stop, ncol=11)
# drop everything
df.drop(list(range(5, 11)), axis='columns', inplace=True)
# we read the coordinates in bohr so no need to convrt
df.columns = ['set', 'symbol', 'x', 'y', 'z']
df['set'] = df['set'].astype(int)
df['Z'] = df['symbol'].map(sym2z)
df['frame'] = idx
df['set'] -= 1
dfs.append(df)
atom = pd.concat(dfs, ignore_index=True)
elif found1:
start = stop = found1[-1] + 4
while self[stop].strip(): stop += 1
atom = self.pandas_dataframe(start, stop, ncol=8)
atom.drop(list(range(5,8)), axis='columns', inplace=True)
atom.columns = ['set', 'symbol', 'x', 'y', 'z']
for c in ['x', 'y', 'z']: atom[c] *= Length['Angstrom', 'au']
atom['Z'] = atom['symbol'].map(sym2z)
atom['set'] -= 1
atom['frame'] = 0
elif found2:
#if len(found) > 1:
# raise NotImplementedError("We can only parse outputs from a single NMR calculation")
atom = []
for idx, val in enumerate(found2):
start = val + 3
stop = start
while self[stop].strip(): stop += 1
# a bit of a hack to make sure that there is no formatting change depending on the
# number of atoms in the molecule as the index is right justified so if there are
# more than 100 atoms it will fill the alloted space for the atom index and change the
# delimitter and therefore the number of columns
self[start:stop] = map(lambda x: x.replace('(', ''), self[start:stop])
df = self.pandas_dataframe(start, stop, ncol=5)
df.columns = ['symbol', 'set', 'x', 'y', 'z']
for c in ['x', 'y', 'z']: df[c] *= Length['Angstrom', 'au']
df['Z'] = df['symbol'].map(sym2z)
df['frame'] = idx
# remove the trailing chracters from the index
df['set'] = list(map(lambda x: x.replace('):', ''), df['set']))
df['set'] = df['set'].astype(int) - 1
atom.append(df)
atom = pd.concat(atom)
else:
raise NotImplementedError("We could not find the atom table in this output. Please submit "+ \
"an issue ticket so we can add it in.")
self.atom = atom
def parse_basis_set(self):
# Find the basis set
_re_bas_00 = '(Slater-type) F U N C T I O N S'
_re_bas_01 = 'Atom Type'
start = self.find(_re_bas_00, keys_only=True)[-1] + 3
starts = self.find(_re_bas_01, start=start, keys_only=True)
lines = []
for ext in starts:
for i in range(4):
lines.append(start + ext + i)
stop = start + ext + 4
while self[stop].strip():
lines.append(stop)
stop += 1
df = pd.read_fwf(StringIO('\n'.join([self[i] for i in lines])),
widths=[4, 2, 12, 4],
names=['n', 'L', 'alpha', 'symbol'])
# Where atom types change
idxs = [0] + df['n'][df['n'] == '---'].index.tolist() + [df.shape[0]]
sets, shells = [], []
for i, (start, stop) in enumerate(zip(idxs, idxs[1:])):
sets.append(np.repeat(i - 1, stop - start))
shells.append(np.arange(-1, stop - start - 1))
df['set'] = np.concatenate(sets)
df['shell'] = np.concatenate(shells)
# Atom table basis set map
basmap = df['symbol'].dropna()
basmap = basmap[basmap.str.endswith(')')].str.strip(')')
basmap = {val: df['set'][key] + 1 for
key, val in basmap.to_dict().items()}
# Discard the garbage
drop = df['n'].str.strip().str.isnumeric().fillna(False)
df.drop(drop[drop == False].index, inplace=True)
df.drop('symbol', axis=1, inplace=True)
# Clean up the series
df['alpha'] = df['alpha'].astype(np.float64)
df['n'] = df['n'].astype(np.int64)
df['L'] = df['L'].str.lower().map(lmap)
df['d'] = np.sqrt((2 * df['L'] + 1) / (4 * np.pi))
df['r'] = df['n'] - (df['L'] + 1)
df['frame'] = 0
self.basis_set = BasisSet(df)
self.meta['spherical'] = False
self.atom['set'] = self.atom['symbol'].map(basmap)
def parse_basis_set_order(self):
# All the columns we need
data = defaultdict(list)
sets = self.basis_set.groupby('set')
# Iterate over atoms
for center, symbol, seht in zip(self.atom.index,
self.atom['symbol'],
self.atom['set']):
# Per basis set
bas = sets.get_group(seht).groupby('L')
for L, grp in bas:
# Iterate over cartesians
for l, m, n in enum_cartesian[L]:
for shell, r in zip(grp['shell'], grp['r']):
data['center'].append(center)
data['symbol'].append(symbol)
data['shell'].append(shell)
data['seht'].append(seht)
data['L'].append(L)
data['l'].append(l)
data['m'].append(m)
data['n'].append(n)
data['r'].append(r)
data['set'] = data.pop('seht')
data['frame'] = 0
self.basis_set_order = pd.DataFrame.from_dict(data)
self.basis_set_order['prefac'] = (self.basis_set_order['L'].apply(dfac21) /
(self.basis_set_order['l'].apply(dfac21) *
self.basis_set_order['m'].apply(dfac21) *
self.basis_set_order['n'].apply(dfac21))
).apply(np.sqrt)
def parse_orbital(self):
_re_orb_00 = 'Orbital Energies, both Spins'
_re_orb_01 = 'Orbital Energies, per Irrep and Spin'
found = self.find(_re_orb_00, _re_orb_01, keys_only=True)
# Open shell vs. closed shell
cols = {
_re_orb_00: ['symmetry', 'vector', 'spin', 'occupation', 'energy', 'eV'],
_re_orb_01: ['vector', 'occupation', 'energy', 'eV', 'dE']}
key = _re_orb_00 if found[_re_orb_00] else _re_orb_01
ldx = found[key][-1] + 4
starts = []
stops = []
irreps = []
while self[ldx].strip() != '':
# error catching for when we have a symmetry label
try:
_ = int(self[ldx].strip()[0])
ldx += 1
except ValueError:
stops.append(ldx)
irreps.append(self[ldx])
# to ensure that we do not skip over the blank line
# and exdecute an infinite while loop
if not (self[ldx].strip() == ''):
ldx += 1
starts.append(ldx)
else:
break
else:
# to get the bottom of the table
stops.append(ldx)
# the first entry is actually the very beginning of the table
stops = stops[1:]
# put everything together
dfs = []
for start, stop, irrep in zip(starts, stops, irreps):
df = self.pandas_dataframe(start, stop, cols[key])
df['irrep'] = irrep.strip()
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
df['vector'] -= 1
if 'spin' in cols[key]:
df['spin'] = df.spin.map({'A': 0, 'B': 1})
df.sort_values(by=['spin', 'energy'], inplace=True)
else:
df.sort_values(by='energy', inplace=True)
df['spin'] = 0
df.reset_index(drop=True, inplace=True)
df['frame'] = df['group'] = 0
self.orbital = df
def parse_contribution(self):
_re_con_00 = ('E(eV) Occ MO % '
'SFO (first member) E(eV) Occ Fragment')
# MO contribution by percentage
found = self.find(_re_con_00, keys_only=True)
starts = [i + 3 for i in found]
widths = [12, 6, 6, 6, 11, 6, 10, 12, 6, 6, 3]
names = ['eV', 'occupation', 'vector', 'sym', '%', 'SFO',
'angmom', 'eV(sfo)', 'occ(sfo)', 'atom', 'symbol']
dfs = []
# Prints for both spins
for i, start in enumerate(starts):
stop = start
while self[stop].strip(): stop += 1
dfs.append(pd.read_fwf(StringIO('\n'.join(self[start:stop])),
delim_whitespace=True, widths=widths,
names=names))
dfs[-1]['spin'] = i
dfs = pd.concat(dfs).reset_index(drop=True)
dfs = dfs.applymap(lambda x: np.nan if (isinstance(x, six.string_types)
and x.isspace()) else x)
dfs.fillna(method='ffill', inplace=True)
# Clean up
dfs['symbol'] = dfs['symbol'].str.strip()
dfs['angmom'] = dfs['angmom'].str.strip()
dfs['angmom'].update(dfs['angmom'].map({'S': 'S:'}))
dfs[['L', 'ml']] = dfs['angmom'].str.extract('(.*):(.*)', expand=True)
dfs['%'] = dfs['%'].str.replace('%', '')
dfs['%'].update(dfs['%'].map({" ******": np.inf}))
dfs['%'] = dfs['%'].astype(np.float64)
dfs['occupation'] = dfs['occupation'].astype(np.float64)
dfs['vector'] = dfs['vector'].astype(np.int64) - 1
dfs['eV'] = dfs['eV'].astype(np.float64)
dfs['atom'] -= 1
self.contribution = dfs
def parse_excitation(self):
# Excitation
_re_exc_00 = '(sum=1) transition dipole moment'
_re_exc_01 = ' no. E/a.u. E/eV f Symmetry'
found = self.find_next(_re_exc_00, keys_only=True)
if not found: return
# First table of interest here
start = found + 4
stop = self.find_next(_re_exc_01, keys_only=True) - 3
os = len(self[start].split()) == 9
todrop = ['occ:', 'virt:']
cols = ['excitation', 'occ', 'drop', 'virt', 'weight', 'TDMx', 'TDMy', 'TDMz']
if os: cols.insert(1, 'spin')
if os: todrop = ['occ', 'virt']
adf = self.pandas_dataframe(start, stop, cols)
adf.drop('drop', axis=1, inplace=True)
s1 = set(adf[cols[1]][adf[cols[1]] == 'NTO'].index)
s2 = set(adf['excitation'][adf['excitation'].isin(todrop)].index)
adf.drop(s1 | s2, axis=0, inplace=True)
adf['excitation'] = adf['excitation'].str[:-1].astype(np.int64) - 1
if os: adf['spin'] = adf['spin'].map({'Alph': 0, 'Beta': 1})
adf[['occ', 'occsym']] = adf['occ'].str.extract('([0-9]*)(.*)', expand=True)
adf[['virt', 'virtsym']] = adf['virt'].str.extract('([0-9]*)(.*)', expand=True)
adf['occ'] = adf['occ'].astype(np.int64) - 1
adf['virt'] = adf['virt'].astype(np.int64) - 1
# Second one here
start = stop + 5
stop = start
while self[stop].strip(): stop += 1
cols = _re_exc_01.split()
df = self.pandas_dataframe(start, stop + 1, cols)
df.drop(cols[0], axis=1, inplace=True)
df.columns = ['energy', 'eV', 'osc', 'symmetry']
# Expand the second table to fit the original
for col in df.columns: adf[col] = adf.excitation.map(df[col])
adf['frame'] = adf['group'] = 0
self.excitation = adf
def parse_momatrix(self):
_re_mo_00 = 'Eigenvectors .* in BAS representation'
_re_mo_01 = 'row '
_re_mo_02 = 'nosym'
found = self.regex(_re_mo_00, _re_mo_01, _re_mo_02,
flags=re.IGNORECASE, keys_only=True)
if not found[_re_mo_00] or not found[_re_mo_01]: return
if found[_re_mo_02]:
thresh = found[_re_mo_00][0]
rowmajor = 'rows' in self[thresh]
starts = np.array([i for i in found[_re_mo_01] if i > thresh]) + 1
nchi = starts[1] - starts[0] - 3
ncol = len(self[starts[0] + 1].split()) - 1
if len(starts) % 2: os = False
else:
anchor = starts[len(starts)//2 - 1] + nchi
sail = starts[len(starts)//2]
os = True if self.find('SPIN 2', start=anchor, stop=sail) else False
blocks = [starts] if not os else [starts[:len(starts)//2],
starts[len(starts)//2:]]
data = pd.DataFrame()
for i, block in enumerate(blocks):
stop = block[-1] + nchi
skips = [k + j for k in list(block[1:] - block[0] - 3) for j in range(3)]
name = 'coef' if not i else 'coef{}'.format(i)
col = self.pandas_dataframe(block[0], stop, ncol + 1,
skiprows=skips).drop(0, axis=1,
).unstack().dropna().reset_index(drop=True)
data[name] = col
norb = len(data.index) // nchi
data['orbital'] = np.concatenate([np.repeat(range(i, norb, ncol), nchi)
for i in range(ncol)])
data['chi'] = np.tile(range(nchi), norb)
data['frame'] = 0
if rowmajor:
data.rename(columns={'orbital': 'chi', 'chi': 'orbital'}, inplace=True)
data.sort_values(by=['orbital', 'chi'], inplace=True)
self.momatrix = data
else:
print('Symmetrized calcs not supported yet.')
def parse_sphr_momatrix(self, verbose=False):
"""
Parser localized momatrix (if present).
If the ``locorb`` keyword is used in ADF, an additional momatrix is
printed after localization is performed. Parsing this table allows
for visualization of these orbitals.
Note:
The attr :attr:`~exatomic.adf.output._re_loc_mo` is used for parsing this
section.
"""
_re_loc_mo = ("Localized MOs expanded in CFs+SFOs",
"SFO contributions (%) per Localized Orbital")
found = self.find(*_re_loc_mo)
if len(found[_re_loc_mo[0]]) == 0:
if verbose:
print("No localization performed.")
return # Nothing to parse
start = found[_re_loc_mo[0]][0][0] + 8
stop = found[_re_loc_mo[1]][0][0] - 4
# Parse the localized momatrix as a whole block of text
df = pd.read_fwf(StringIO("\n".join(self[start:stop])),
widths=(16, 9, 9, 9, 9, 9, 9, 9, 9), header=None)
del df[0]
# Identify the eigenvectors and (un)stack them correctly
n = df[df[1].isnull()].index[0] # number of basis functions
m = np.ceil(df.shape[0]/n).astype(int) # number of printed blocks of text
# idx - indexes of "lines" (rows) that don't contain coefficients
idx = [(n+5)*j + i - 5 for j in range(1, m) for i in range(0, 5)]
df = df[~df.index.isin(idx)]
coefs = []
for i in range(0, df.shape[0]//n+1):
d = df.iloc[n*(i-1):n*i, :]
coefs.append(d.unstack().dropna().values.astype(float))
coefs = np.concatenate(coefs)
m = coefs.shape[0]//n # Number of localized MOs
momatrix = pd.DataFrame.from_dict({'coef': coefs,
'orbital': [i for i in range(m) for _ in range(n)],
'chi': [j for _ in range(m) for j in range(n)]})
momatrix['frame'] = self.atom['frame'].unique()[-1]
self.sphr_momatrix = momatrix
def parse_gradient(self):
_regrad = "Energy gradients wrt nuclear displacements"
found = self.find(_regrad, keys_only=True)
if not found:
return
starts = np.array(found) + 6
stop = starts[0]
while '----' not in self[stop]: stop += 1
stops = starts + (stop - starts[0])
dfs = []
for i, (start, stop) in enumerate(zip(starts, stops)):
df = self.pandas_dataframe(start, stop, ncol=5)
df.columns = ['atom', 'symbol', 'fx', 'fy', 'fz']
df['frame'] = i
df['atom'] -= 1
dfs.append(df)
grad = pd.concat(dfs, ignore_index=True)
grad['Z'] = grad['symbol'].map(sym2z)
grad = grad[['atom', 'Z', 'fx', 'fy', 'fz', 'symbol', 'frame']]
for u in ['fx', 'fy', 'fz']: grad[u] *= 1./Length['Angstrom', 'au']
self.gradient = grad
def parse_frequency(self):
_renorm = "Vibrations and Normal Modes"
_refreq = "List of All Frequencies:"
found = self.find(_refreq, keys_only=True)
if not found:
return
elif len(found) > 1:
raise NotImplementedError("We cannot parse more than one frequency calculation in a single output")
found = self.find(_refreq, _renorm, keys_only=True)
start = found[_refreq][0] + 9
stop = start
while self[stop]: stop += 1
df = self.pandas_dataframe(start, stop, ncol=3)
freqs = df[0].values
n = int(np.ceil(freqs.shape[0]/3))
start = found[_renorm][0] + 9
stop = start
while self[stop]: stop += 1
natoms = stop - start
dfs = []
fdx = 0
for i in range(n):
if i == 0:
start = found[_renorm][0] + 9
else:
start = stop + 4
stop = start + natoms
freqs = list(map(lambda x: float(x), self[start-2].split()))
ncol = len(freqs)
df = self.pandas_dataframe(start, stop, ncol=1+3*ncol)
tmp = list(map(lambda x: x.split('.'), df[0]))
index, symbol = list(map(list, zip(*tmp)))
slices = [list(range(1+i, 1+3*ncol, 3)) for i in range(ncol)]
dx, dy, dz = [df[i].unstack().values for i in slices]
freqdx = np.repeat(list(range(fdx, ncol+fdx)), natoms)
zs = pd.Series(symbol).map(sym2z)
freqs = np.repeat(freqs, natoms)
stacked = pd.DataFrame.from_dict({'Z': np.tile(zs, ncol), 'label': np.tile(index, ncol), 'dx': dx,
'dy': dy, 'dz': dz, 'frequency': freqs, 'freqdx': freqdx})
stacked['ir_int'] = 0.0
stacked['symbol'] = np.tile(symbol, ncol)
dfs.append(stacked)
fdx += ncol
frequency = pd.concat(dfs, ignore_index=True)
frequency['frame'] = 0
# TODO: check units of the normal modes
self.frequency = frequency
def parse_nmr_shielding(self):
_reatom = "N U C L E U S :"
_reshield = "==== total shielding tensor"
_renatom = "NUCLEAR COORDINATES (ANGSTROMS)"
found = self.find(_reatom, keys_only=True)
if not found:
#raise NotImplementedError("Could not find {} in output".format(_reatom))
return
ncalc = self.find(_renatom, keys_only=True)
ncalc.append(len(self))
ndx = 0
dfs = []
for start in found:
try:
ndx = ndx if start > ncalc[ndx] and start < ncalc[ndx+1] else ndx+1
except IndexError:
raise IndexError("It seems that there was an issue with determining which NMR calculation we are in")
start_shield = self.find(_reshield, keys_only=True, start=start)[0] + start + 2
end_shield = start_shield + 3
symbol, index = self[start].split()[-1].split('(')
index = int(index.replace(')', ''))
isotropic = float(self[start_shield+4].split()[-1])
df = self.pandas_dataframe(start_shield, end_shield, ncol=3)
cols = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']
df = pd.DataFrame(df.unstack().values.reshape(1,9), columns=cols)
df['isotropic'] = isotropic
df['atom'] = index - 1
df['symbol'] = symbol
df['label'] = 'nmr shielding'
df['frame'] = ndx
dfs.append(df)
shielding = pd.concat(dfs, ignore_index=True)
self.nmr_shielding = shielding
def parse_j_coupling(self):
_recoupl = "total calculated spin-spin coupling:"
_reatom = "Internal CPL numbering of atoms:"
found = self.find(_reatom, keys_only=True)
if not found:
return
found = self.find(_reatom, _recoupl, keys_only=True)
# we grab the tensors inside the principal axis representation
# for the cartesian axis representation we start the list at 0 and grab every other instance
start_coupl = found[_recoupl][1::2]
start_pert = np.array(found[_reatom]) - 3
dfs = []
# grab atoms
cols = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']
for ln, start in zip(start_pert, start_coupl):
line = self[ln].split()
# we just replace all of the () in the strings
pert_nucl = list(map(lambda x: x.replace('(', '').replace(')', ''), line[5:]))
nucl = list(map(lambda x: x.replace('(', '').replace(')', ''), line[1:3]))
# grab both tensors
df = self.pandas_dataframe(start+2, start+5, ncol=6)
# this will grab the iso value and tensor elements for the j coupling in hz
df.drop(range(3), axis='columns', inplace=True)
df = pd.DataFrame(df.unstack().values.reshape(1,9), columns=cols)
iso = self[start+1].split()[-1]
# place all of the dataframe columns
df['isotropic'] = float(iso)
df['atom'] = int(nucl[0])
df['symbol'] = nucl[1]
df['pt_atom'] = int(pert_nucl[0])
df['pt_symbol'] = pert_nucl[1]
df['label'] = 'j coupling'
df['frame'] = 0
dfs.append(df)
# put everything together
j_coupling = pd.concat(dfs, ignore_index=True)
j_coupling['atom'] -= 1
j_coupling['pt_atom'] -= 1
self.j_coupling = j_coupling
def __init__(self, *args, **kwargs):
super(Output, self).__init__(*args, **kwargs)
| apache-2.0 |
cl4rke/scikit-learn | sklearn/manifold/t_sne.py | 106 | 20057 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
okadate/romspy | romspy/tplot/tplot_param.py | 1 | 4044 | # coding: utf-8
# (c) 2016-01-27 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.offsetbox import AnchoredText
import numpy as np
import pandas as pd
import glob
import romspy
def tplot_param(inifiles, vname, ax=plt.gca()):
for inifile in inifiles:
print inifile,
nc = netCDF4.Dataset(inifile, 'r')
param = nc[vname][:]
#param = np.exp(param)
time = nc['ocean_time'][:]
time = netCDF4.num2date(time, romspy.JST)
print time, param
if 'params' not in locals():
default = param[-1]
params = param[0]
times = time[0]
else:
params = np.append(params, param[0])
times = np.append(times, time[0])
ax.plot(times, params, 'o-', label='opt param')
ax.set_ylabel(vname)
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
moving_avg(times, params, ax, window=3)
moving_avg(times, params, ax, window=7)
ax.legend()
pmean = np.mean(params)
pmedian = np.median(params)
#ax.text(0.1,0.1,'mean={}'.format(pmean), transform=ax.transAxes)
text = AnchoredText('mean={:.2e}'.format(pmean), loc=2)
ax.add_artist(text)
ax.plot([times[0], times[-1]], [default, default], '-', alpha=0.5, label='default')
def moving_avg(times, params, ax, window=3):
df = pd.DataFrame(data={'param':params}, index=times)
#df = df.resample('mean', )
df = pd.rolling_mean(df, window=window, min_periods=1, center=True)
ax.plot(df.index, df.param.values, '--', label='{}d-avg'.format(window))
def _get_files(tmpfile, hours):
Nfiles = len(glob.glob(tmpfile))
tmpfile = tmpfile.replace('*','{}')
outfiles = [tmpfile.format(i) for i in range(0,hours*Nfiles,hours)]
return outfiles
def _plot6(inifiles):
fig, ax = plt.subplots(6,1,figsize=(12,12))
tplot_param(inifiles, 'P01', ax=ax[0])
tplot_param(inifiles, 'P04', ax=ax[1])
tplot_param(inifiles, 'P05', ax=ax[2])
tplot_param(inifiles, 'P06', ax=ax[3])
tplot_param(inifiles, 'P07', ax=ax[4])
tplot_param(inifiles, 'P08', ax=ax[5])
return ax
def _get_pfactor(test):
if '0001' in test:
return 0.001
elif '0005' in test:
return 0.005
elif '001' in test:
return 0.01
elif '005' in test:
return 0.05
elif '01' in test:
return 0.1
def main(test, hours=24):
inifiles = _get_files('/home/okada/ism-i/apps/OB500P/testDA/{}/output/ob500_ini_*.nc'.format(test), hours=hours)
figfile = '/home/okada/Dropbox/Figures/2016_param/tplot_param_{}.png'.format(test)
if test == 'param2':
fig, ax = plt.subplots(2,1)
tplot_param(inifiles, 'P01', ax=ax[0])
tplot_param(inifiles, 'P04', ax=ax[1])
ax[0].set_title('4dvar(ini+param), window=1day, pfactor=0.1')
elif 'param3' in test:
ax = _plot6(inifiles)
pfactor = _get_pfactor(test)
ax[0].set_title('4dvar(ini+param), window=1day, pfactor={}'.format(pfactor))
elif 'param4' in test:
ax = _plot6(inifiles)
pfactor = _get_pfactor(test)
ax[0].set_title('4dvar(param), window=1day, pfactor={}'.format(pfactor))
elif 'param5' in test:
ax = _plot6(inifiles)
pfactor = '*'
ax[0].set_title('4dvar(ini+param), window=1day, pfactor={}'.format(pfactor))
elif 'param6' in test:
ax = _plot6(inifiles)
pfactor = '*'
ax[0].set_title('4dvar(param), window=7day, pfactor={}'.format(pfactor))
romspy.savefig(figfile)
#plt.show()
if __name__ == '__main__':
import seaborn as sns
#main('param5-05')
#main('param5-01')
#main('param5-005')
#main('param5-001')
#main('param5-01-hev')
#main('param5-001-hev')
#main('param5-001-7days', hours=24*7)
#main('param6-p01-1', hours=24*7)
#main('param6-p001-1', hours=24*7)
#main('param6R-p01-7', hours=24*7)
#main('param6R-p001-7', hours=24*7)
main('param6-ini', hours=24)
| mit |
ychfan/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 13 | 19945 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
binghongcha08/pyQMD | QMC/MC_exchange/permute3d/5.0/energy.py | 3 | 1784 | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
font = {'family' : 'Times New Roman', 'weight' : 'regular', 'size' : '18'}
mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['xtick.major.pad']='4'
mpl.rcParams['ytick.major.pad']='4'
#plt.figure(figsize=(14,9))
fig, ax = plt.subplots()
minorLocatorX = MultipleLocator(0.25)
minorLocatorY = MultipleLocator(0.2)
# for the minor ticks, use no labels; default NullFormatter
# data
x, y, yerr, exact = np.genfromtxt('energy.dat',unpack=True,skip_header=1,
comments = '#')
# First illustrate basic pyplot interface, using defaults where possible.
#plt.errorbar(x, y, yerr=yerr, ecolor='g',capsize=6,elinewidth=2, capthick=2,label='Approximate')
ax.set_ylabel('Ground-state Energy [$E_h$]')
ax.set_xlabel('R$_0$ [$a_0$]',labelpad=12)
plt.xlim(0.8,3.3)
plt.ylim(1.6,3.3)
plt.yticks((1.6,2.0,2.4,2.8,3.2))
plt.minorticks_on()
ax.tick_params(axis='both',which='minor',length=5,width=2,labelsize=18)
ax.tick_params(axis='both',which='major',length=8,width=2,labelsize=18)
plt.hlines(3.18, 0.8,3.5,linewidth=2,linestyles='dashed', colors='r')
#zpe = (0.318953,0.343397,0.351372)
#zpe += np.sqrt(2.)/4.0
ax.plot(x, exact,'k--o', lw=2,markersize=8,label='Exact')
#x = np.resize(x,4)
#y = np.resize(y,4)
#x[-1] = 3.0
#y[-1] = 3.523319
ax.plot(x,y,'g-s',linewidth=2, markersize=8,label='Approximate')
ax.xaxis.set_minor_locator(minorLocatorX)
ax.yaxis.set_minor_locator(minorLocatorY)
plt.annotate('$E_{local}$',(2,3.0))
plt.legend(loc=4, frameon=False)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.14)
plt.savefig('GSE_3D.pdf')
plt.show()
| gpl-3.0 |
michigraber/scikit-learn | sklearn/preprocessing/tests/test_label.py | 35 | 18559 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
LEX2016WoKaGru/pyClamster | scripts/doppel/doppel_test_sensitivy.py | 1 | 2659 | #!/usr/bin/env python3
import pyclamster
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('tfinn-poster')
#plt.ticklabel_format(style='sci', axis='x', scilimits=(-100000,100000))
rng = np.random.RandomState(42)
azi1, ele1 = 3.526, 0.636
azi2, ele2 = 3.567, 0.666
stdev = 0.1
points = 1000
azi1 = azi1+pyclamster.deg2rad(rng.normal(0,stdev,size=points))
ele1 = ele1+pyclamster.deg2rad(rng.normal(0,stdev,size=points))
azi2 = azi2+pyclamster.deg2rad(rng.normal(0,stdev,size=points))
ele2 = ele2+pyclamster.deg2rad(rng.normal(0,stdev,size=points))
#azi1 = pyclamster.deg2rad(azi1+rng.normal(0,stdev,size=points))
#ele1 = pyclamster.deg2rad(ele1+rng.normal(0,stdev,size=points))
#azi2 = pyclamster.deg2rad(azi2+rng.normal(0,stdev,size=points))
#ele2 = pyclamster.deg2rad(ele2+rng.normal(0,stdev,size=points))
theo1 = pyclamster.Coordinates3d(
azimuth = azi1,
elevation = ele1,
azimuth_clockwise = True,
azimuth_offset = 3/2*np.pi,
elevation_type = "zenith"
)
x, y = pyclamster.Projection().lonlat2xy(11.240817, 54.4947)
pos1 = pyclamster.Coordinates3d(
x = x, y = y, z = 9
)
theo2 = pyclamster.Coordinates3d(
azimuth = azi2,
elevation = ele2,
azimuth_clockwise = True,
azimuth_offset = 3/2*np.pi,
elevation_type = "zenith"
)
x, y = pyclamster.Projection().lonlat2xy(11.2376833, 54.495866)
pos2 = pyclamster.Coordinates3d(
x = x, y = y, z = 0
)
doppel,var_list_c3d = pyclamster.positioning.doppelanschnitt_Coordinates3d(
aziele1 = theo1,
aziele2 = theo2,
pos1 = pos1,
pos2 = pos2,
plot_info=True
)
#ax = pyclamster.doppelanschnitt_plot('c3d single point by hand',doppel,var_list_c3d,pos1,pos2,plot_view=1,plot_position=1)
ax = doppel.plot3d()
ax.scatter3D(pos1.x,pos1.y,pos1.z,color='red', label='cam3')
ax.scatter3D(pos2.x,pos2.y,pos2.z,color='green', label='cam4')
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.legend()
#ax.set_xlim([-100, 300])
#ax.set_ylim([-100, 300])
#ax.set_zlim([-100, 300])
print(np.min(doppel.x), np.median(doppel.x), np.max(doppel.x))
print(np.min(doppel.y), np.median(doppel.y), np.max(doppel.y))
print(np.min(doppel.z), np.median(doppel.z), np.max(doppel.z))
binwidth = 10
fig, ax = plt.subplots()
ax.hist(doppel.z, range=(0, 10000), bins=100, label='Height distribution')
ax.axvline(x=np.median(doppel.z), color='darkred', label='Median')
ax.axvline(x=np.mean(doppel.z), color='green', label='Mean')
#ax.set_xlim([min(0, np.min(doppel.z)), max(500, np.max(doppel.z))])
ax.set_ylabel('Occurrence / {0:d} draws'.format(points))
ax.set_xlabel('Height [m], {0:d} m per bin'.format(binwidth))
plt.legend()
plt.show() | gpl-3.0 |
phantomlinux/IoT-tracking | VAUGHN/webapp/src/utils/database_cass.py | 2 | 2768 | from src.utils import logger, tools
from cassandra.cluster import Cluster
import pandas as pd
log = logger.create_logger(__name__)
CNT_QUERY = "SELECT prodID, consID, topic, ts,count(*) as cnt " \
"FROM CNT " \
"WHERE ts >= %s " \
"AND ts <= %s"\
"GROUP BY id, prodID, consID, topic ALLOW FILTERING"
CNT_QUERY_FROM_X = "SELECT * FROM {} WHERE ts >= %s " \
"AND ts <= %s ALLOW FILTERING"
CASS_CONTACT_POINTS = ["127.0.0.1"]
CASS_KEYSPACE = "brokertracker"
def connect():
try:
cluster = Cluster(CASS_CONTACT_POINTS)
session = cluster.connect(CASS_KEYSPACE)
log.info("Connected to Cassandra.")
return session
except AttributeError as e:
log.error("Error connecting to Cassandra: {}".format(e.args))
log.error("Error connecting to Cassandra.")
def getJoinCnt(session, params):
l = []
minTS = params[0].strftime('%Y-%m-%d %H:%M:%S')
maxTS = params[1].strftime('%Y-%m-%d %H:%M:%S')
rows = session.execute(query=CNT_QUERY, parameters=(minTS, maxTS), trace=True)
print(rows.get_query_trace())
for row in rows:
d = {'prodID': row.prodid,
'consID': row.consid,
'topic': row.topic,
'cnt': row.cnt,
'ts': row.ts.strftime("%Y-%m-%d %H:%M:%S")}
l.append(d)
log.info("cnt: {}, {}, {}, {}, {}".format(row.prodid, row.consid, row.topic, row.cnt, row.ts))
return l
def getJoinCntFromX(session, params):
if session is None:
session = connect()
l = []
minTS = params[0].strftime('%Y-%m-%d %H:%M:%S')
maxTS = params[1].strftime('%Y-%m-%d %H:%M:%S')
rows = session.execute(query=CNT_QUERY_FROM_X.format(params[2]), parameters=(minTS, maxTS), trace=True)
for row in rows:
d = {'prodID': row.prodid,
'consID': row.consid,
'topic': row.topic,
'cnt': row.cnt,
'ts': row.ts.strftime("%Y-%m-%d %H:%M:%S")}
l.append(d)
log.info("cntX: {}, {}, {}, {}".format(row.prodid, row.consid, row.topic, row.ts))
return l
def getTopics():
topics = []
session = connect()
QUERY = "SELECT DISTINCT topic FROM topic_list"
rows = session.execute(QUERY)
for row in rows:
topics.append(row.topic)
return topics
def getSubscribers():
subs = []
session = connect()
QUERY = "SELECT DISTINCT cons FROM cons_list"
rows = session.execute(QUERY)
for row in rows:
subs.append(row.cons)
return subs
def getPublisher():
pubs = []
session = connect()
QUERY = "SELECT DISTINCT prod FROM prod_list"
rows = session.execute(QUERY)
for row in rows:
pubs.append(row.prod)
return pubs
| apache-2.0 |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
arabenjamin/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
pylayers/pylayers | pylayers/util/CDF.py | 1 | 6249 | # -*- coding:Utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
#import mplrc.ieee.transaction
#
# mplrc is a python module which provides an easy way to change
# matplotlib's plotting configuration for specific publications.
# git clone https://github.com/arsenovic/mplrc.git
#
#
# from matplotlib import rcParams
# rcParams['text.usetex'] = True
# rcParams['text.latex.unicode'] = True
class CDF(object):
def __init__(self, ld, filename='',filetype=[]):
"""
cdf = CDF(ld)
Parameters
----------
ld : list
list of dictionnary
filename : string
Notes
-----
d0 = ld[0]
d0['bound'] : abscisse bounds of the cdf
d0['values'] : valeurs
d0['xlabel'] :
d0['ylabel'] :
d0['legend'] : legend
d0['title] : title
d0['filename] : filename
d0['linewidth'] : linewidth
"""
self.ld = ld
self.filename = filename
if self.filename == '':
self.save=False
else:
self.save=True
plt.rcParams['xtick.labelsize'] ='x-large'
plt.rcParams['ytick.labelsize'] ='x-large'
plt.rcParams['axes.labelsize'] ='large'
plt.rcParams['font.weight'] ='normal'
plt.rcParams['xtick.minor.size']=2
plt.rcParams['legend.fontsize'] = 'xx-large'
plt.rcParams['font.size'] =10
plt.rcParams['grid.linewidth'] =3.5
plt.rcParams['xtick.major.pad'] =20
if filetype == []:
self.filetype = ['jpg','eps','pdf']
else:
self.filetype = filetype
self.bound = []
self.cdf = []
for d in self.ld:
if d.has_key('bound'):
bound = d['bound']
else:
bound = np.linspace(d['values'].min(),
d['values'].max()+0.1*d['values'].max(),
len(d['values']*0.1))
values = d['values']
Nv = len(values)
cdf = np.array([])
for k in bound:
u = np.nonzero(values <= k)
lu = len(u[0]) / (Nv * 1.0)
cdf = np.hstack((cdf, lu))
self.axis=[0,bound[-1],0,1.]
self.cdf.append(cdf)
self.bound.append(bound)
def show(self,**kwargs):
""" show cdf
"""
if 'fig' not in kwargs:
f = plt.figure(**kwargs)
else:
f = kwargs['fig']
if 'ax' not in kwargs:
ax = f.add_subplot(111)
else:
ax = kwargs['ax']
leg = []
c = []
for k in range(len(self.ld)):
d = self.ld[k]
if d.has_key('bound'):
bound = d['bound']
else:
bound = np.linspace(d['values'].min(),d['values'].max(),len(d['values']*0.1))
if d.has_key('marker'):
marker = d['marker']
else:
marker = ''
if d.has_key('markersize'):
markersize = d['markersize']
else:
markersize = 5
if d.has_key('markercolor'):
markercolor = d['markercolor']
else:
markercolor = 'k'
if d.has_key('markerfrequency'):
markerfrequency = d['markerfrequency']
else:
markerfrequency = 10
if d.has_key('linewidth'):
linewidth = d['linewidth']
else:
linewidth = 1
if d.has_key('linestyle'):
linestyle = d['linestyle']
else:
linestyle = '-'
if d.has_key('color'):
color = d['color']
else:
color ='k'
if d.has_key('legend'):
legend = d['legend']
else:
legend=''
if d.has_key('title'):
title = d['title']
else:
title=''
if k == 0:
if d.has_key('x_label'):
xlabel=d['x_label']
else:
xlabel=''
if d.has_key('y_label'):
ylabel=d['y_label']
else:
ylabel=''
self.bound[k] = bound
# leg.append(legend)
cdf = self.cdf[k]
c.append(ax.plot(bound, cdf, marker=marker,
markevery=markerfrequency, ms=markersize, mfc=markercolor,
ls=linestyle, c=color, linewidth=linewidth,
label=legend))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax.legend(loc='best', scatterpoints=1, numpoints=1.)
plt.axis(self.axis)
plt.grid()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
if title != '':
plt.title(title)
if self.save :
for typ in self.filetype:
plt.savefig(self.filename + '.' + typ, format=typ,
bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
d0 = {}
d0['values'] = sp.randn(1000)
d0['bound'] = np.arange(-10, 10, 0.1)
d0['xlabel'] = 'xlabel'
d0['ylabel'] = 'ylabel'
d0['legend'] = 'legend '
d0['markersize'] = 3
d0['markercolor'] = 'red'
d0['markerfrequency'] = 2
d0['title'] = 'title'
d0['color'] = 'black'
d0['marker'] = 'o'
d0['linestyle'] = '-'
d0['linewidth'] = 3
d0['filename'] = 'essai.png'
d1 = {}
d1['values'] = 4 * sp.randn(1000)
d1['bound'] = np.arange(-10, 10, 0.1)
d1['xlabel'] = 'xlabel'
d1['ylabel'] = 'ylabel'
d1['legend'] = 'legend '
d1['markersize'] = 3
d1['markercolor'] = 'blue'
d1['linestyle'] = '-'
d1['color'] = 'black'
d1['markerfrequency'] = 2
d1['title'] = 'title'
d1['marker'] = 'o'
d1['linewidth'] = 3
lv = [d0, d1]
c = CDF(lv, 'fig')
c.show()
| mit |
Flumotion/flumotion | tools/theora-bench.py | 3 | 6965 | #!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import gobject
gobject.threads_init()
import pygst
pygst.require('0.10')
import gst
import time
import sys
class SlidingWindow:
def __init__(self, size):
self._window = [0.0] * size
self._windowPtr = 0
self._first = True
# Maintain the current average, and the max of our current average
self.max = 0.0
self.average = 0.0
self.windowSize = size
def addValue(self, val):
self._window[self._windowPtr] = val
self._windowPtr = (self._windowPtr + 1) % self.windowSize
if self._first:
if self._windowPtr == 0:
self._first = False
return
self.average = sum(self._window) / self.windowSize
if self.average > self.max:
self.max = self.average
class TheoraBench:
def __init__(self, filename, outTemplate, width=None, height=None,
framerate=None):
self.framerate = None
self.width = None
self.height = None
self.outfileTemplate = outTemplate
# TODO: What's a reasonable windowSize to use?
windowSize = 20
self.window = SlidingWindow(windowSize)
self.samples = 0
self.data = ([], [], [])
self.pipeline = pipeline = gst.Pipeline()
self.bus = pipeline.get_bus()
filesrc = gst.element_factory_make("filesrc")
decodebin = gst.element_factory_make("decodebin")
self.ffmpegcolorspace = gst.element_factory_make("ffmpegcolorspace")
videorate = gst.element_factory_make("videorate")
videoscale = gst.element_factory_make("videoscale")
self.theoraenc = gst.element_factory_make("theoraenc")
fakesink = gst.element_factory_make("fakesink")
filesrc.set_property("location", filename)
pipeline.add(filesrc, decodebin, self.ffmpegcolorspace, videorate,
videoscale, self.theoraenc, fakesink)
filesrc.link(decodebin)
gst.element_link_many(self.ffmpegcolorspace, videorate, videoscale)
structure = gst.Structure("video/x-raw-yuv")
if height:
structure['height'] = height
if width:
structure['width'] = width
if framerate:
structure['framerate'] = framerate
caps = gst.Caps(structure)
videoscale.link(self.theoraenc, caps)
self.theoraenc.link(fakesink)
decodebin.connect("new-decoded-pad", self._pad_added_cb)
def _eos_cb(self, bus, msg):
print "Done"
fn = self.outfileTemplate % (self.width, self.height,
float(self.framerate))
print "Writing file: ", fn
self.writeGraph(fn, self.data,
"Frame number",
"CPU Percentage required",
("Frame)",
"Sliding Average (%d frames)" % self.window.windowSize,
"Sliding Average Peak"))
self.mainloop.quit()
def writeGraph(self, filename, data, xlabel, ylabel, dataNames):
# data is ([time], [average], [average_peak]) as percentages (floats)
#out = open(filename, "w")
#out.close()
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
length = len(data[0])
pylab.plot(xrange(length), data[1])
pylab.plot(xrange(length), data[2])
pylab.axis([0, length-1, 0, 110])
pylab.savefig(filename, dpi=72)
pass
def _error_cb(self, bus, msg):
error = msg.parse_error()
print "Error: ", error[1]
self.mainloop.quit()
def run(self):
self.mainloop = gobject.MainLoop()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self._eos_cb)
self.bus.connect("message::error", self._eos_cb)
self.pipeline.set_state(gst.STATE_PLAYING)
self.mainloop.run()
def _pad_added_cb(self, decodebin, pad, last):
structure = pad.get_caps()[0]
name = structure.get_name()
if name.startswith('video/x-raw-'):
sinkpad = self.ffmpegcolorspace.get_pad("sink")
pad.link(sinkpad)
#self.framerate = structure['framerate']
sinkpad = self.theoraenc.get_pad("sink")
srcpad = self.theoraenc.get_pad("src")
sinkpad.add_buffer_probe(self._buffer_probe_sink_cb)
srcpad.add_buffer_probe(self._buffer_probe_src_cb)
def _buffer_probe_sink_cb(self, pad, buf):
if not self.framerate:
self.framerate = buf.get_caps()[0]['framerate']
self.width = buf.get_caps()[0]['width']
self.height = buf.get_caps()[0]['height']
self._last_ts = time.time()
return True
def _buffer_probe_src_cb(self, pad, buf):
processing_time = time.time() - self._last_ts
self.window.addValue(processing_time)
self.samples += 1
if self.samples <= self.window.windowSize:
return True # Ignore these, our sliding window isn't very smart
self.data[0].append(processing_time * float(self.framerate) * 100.0)
self.data[1].append(self.window.average * float(
self.framerate) * 100.0)
self.data[2].append(self.window.max * float(self.framerate) * 100.0)
print "This frame: %.2f: %.2f%%. Average: %.2f%%. Peak: %.2f%%" % (
processing_time,
processing_time * float(self.framerate) * 100.0,
self.window.average * float(self.framerate) * 100.0,
self.window.max * float(self.framerate) * 100.0)
return True
if len(sys.argv) == 2:
framerates = [(30, 1),
(25, 1),
(25, 2), (None, None)]
sizes = [(800, 600),
(400, 300),
(None, None)] # Other useful sizes here
for framerate in framerates:
for size in sizes:
if framerate[1]:
fr = gst.Fraction(framerate[0], framerate[1])
else:
fr = None
infile = sys.argv[1]
outfileTemplate = sys.argv[1] + ".%dx%d@%.2f.png"
bench = TheoraBench(sys.argv[1], outfileTemplate, size[0],
size[1], fr)
bench.run()
else:
print "Usage: %s filename.ogg" % sys.argv[0]
| lgpl-2.1 |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/SkFlow_DEPRECATED/text_classification_character_cnn.py | 6 | 3495 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 100
char_processor = skflow.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(char_processor.fit_transform(X_train)))
X_test = np.array(list(char_processor.transform(X_test)))
### Models
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(X, y):
"""Character level convolutional neural network model to predict classes."""
byte_list = tf.reshape(skflow.ops.one_hot_matrix(X, 256),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
classifier = skflow.TensorFlowEstimator(model_fn=char_cnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print("Accuracy: %f" % score)
| apache-2.0 |
0asa/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
nilmtk/nilmtk | nilmtk/legacy/disaggregate/combinatorial_optimisation.py | 1 | 10630 | from warnings import warn
import pandas as pd
import numpy as np
import pickle
import copy
from ...utils import find_nearest
from ...feature_detectors import cluster
from . import Disaggregator
from ...datastore import HDFDataStore
class CombinatorialOptimisation(Disaggregator):
"""1 dimensional combinatorial optimisation NILM algorithm.
Attributes
----------
model : list of dicts
Each dict has these keys:
states : list of ints (the power (Watts) used in different states)
training_metadata : ElecMeter or MeterGroup object used for training
this set of states. We need this information because we
need the appliance type (and perhaps some other metadata)
for each model.
state_combinations : 2D array
Each column is an appliance.
Each row is a possible combination of power demand values e.g.
[[0, 0, 0, 0],
[0, 0, 0, 100],
[0, 0, 50, 0],
[0, 0, 50, 100], ...]
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = []
self.state_combinations = None
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'CO'
def train(self, metergroup, num_states_dict=None, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
num_states_dict : dict
**load_kwargs : keyword arguments passed to `meter.power_series()`
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if num_states_dict is None:
num_states_dict = {}
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
power_series = meter.power_series(**load_kwargs)
chunk = next(power_series)
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
self.train_on_chunk(chunk, meter, max_num_clusters, num_on_states)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
try:
next(power_series)
except StopIteration:
pass
else:
warn("The current implementation of CombinatorialOptimisation"
" can only handle a single chunk. But there are multiple"
" chunks available. So have only trained on the"
" first chunk!")
print("Done training!")
def train_on_chunk(self, chunk, meter, max_num_clusters, num_on_states):
# Check if we've already trained on this meter
meters_in_model = [d['training_metadata'] for d in self.model]
if meter in meters_in_model:
raise RuntimeError(
"Meter {} is already in model!"
" Can't train twice on the same meter!"
.format(meter))
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
def _set_state_combinations_if_necessary(self):
"""Get centroids"""
# If we import sklearn at the top of the file then auto doc fails.
if (self.state_combinations is None or
self.state_combinations.shape[1] != len(self.model)):
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
def disaggregate(self, mains, output_datastore,**load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds. Set to 60 by default.
sections : TimeFrameGroup, optional
Set to mains.good_sections() by default.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
appliance_powers = self.disaggregate_chunk(chunk)
for i, model in enumerate(self.model):
appliance_power = appliance_powers.iloc[:, i]
if len(appliance_power) == 0:
continue
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = model['training_metadata'].instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols)
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Copy mains data to disag output
mains_df = pd.DataFrame(chunk, columns=cols)
output_datastore.append(key=mains_data_location, value=mains_df)
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=[d['training_metadata'] for d in self.model]
)
def disaggregate_chunk(self, mains):
"""In-memory disaggregation.
Parameters
----------
mains : pd.Series
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")
if len(mains) < self.MIN_CHUNK_LENGTH:
raise RuntimeError("Chunk is too short.")
# Because CombinatorialOptimisation could have been trained using
# either train() or train_on_chunk(), we must
# set state_combinations here.
self._set_state_combinations_if_necessary()
"""
# Add vampire power to the model
if vampire_power is None:
vampire_power = get_vampire_power(mains)
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
"""
state_combinations = self.state_combinations
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
# summed_power_of_each_combination is now an array where each
# value is the total power demand for each combination of states.
# Start disaggregation
indices_of_state_combinations, residual_power = find_nearest(
summed_power_of_each_combination, mains.values)
appliance_powers_dict = {}
for i, model in enumerate(self.model):
print("Estimating power demand for '{}'"
.format(model['training_metadata']))
predicted_power = state_combinations[
indices_of_state_combinations, i].flatten()
column = pd.Series(predicted_power, index=mains.index, name=i)
appliance_powers_dict[self.model[i]['training_metadata']] = column
appliance_powers = pd.DataFrame(appliance_powers_dict, dtype='float32')
return appliance_powers
def import_model(self, filename):
with open(filename, 'rb') as in_file:
imported_model = pickle.load(in_file)
self.model = imported_model.model
# Recreate datastores from filenames
for pair in self.model:
store_filename = pair['training_metadata'].store
pair['training_metadata'].store = HDFDataStore(store_filename)
self.state_combinations = imported_model.state_combinations
self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH
def export_model(self, filename):
# Can't pickle datastore, so convert to filenames
original_stores = []
for pair in self.model:
original_store = pair['training_metadata'].store
original_stores.append(original_store)
pair['training_metadata'].store = original_store.store.filename
try:
with open(filename, 'wb') as out_file:
pickle.dump(self, out_file)
finally:
# Restore the stores even if the pickling fails
for original_store, pair in zip(original_stores, self.model):
pair['training_metadata'].store = original_store
| apache-2.0 |
devanshdalal/scikit-learn | sklearn/utils/tests/test_fixes.py | 28 | 3156 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import pickle
import numpy as np
import math
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.fixes import norm
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
def test_masked_array_obj_dtype_pickleable():
marr = MaskedArray([1, None, 'a'], dtype=object)
for mask in (True, False, [0, 1, 0]):
marr.mask = mask
marr_pickled = pickle.loads(pickle.dumps(marr))
assert_array_equal(marr.data, marr_pickled.data)
assert_array_equal(marr.mask, marr_pickled.mask)
def test_norm():
X = np.array([[-2, 4, 5],
[1, 3, -4],
[0, 0, 8],
[0, 0, 0]]).astype(float)
# Test various axis and order
assert_equal(math.sqrt(135), norm(X))
assert_array_equal(
np.array([math.sqrt(5), math.sqrt(25), math.sqrt(105)]),
norm(X, axis=0)
)
assert_array_equal(np.array([3, 7, 17]), norm(X, axis=0, ord=1))
assert_array_equal(np.array([2, 4, 8]), norm(X, axis=0, ord=np.inf))
assert_array_equal(np.array([0, 0, 0]), norm(X, axis=0, ord=-np.inf))
assert_array_equal(np.array([11, 8, 8, 0]), norm(X, axis=1, ord=1))
# Test shapes
assert_equal((), norm(X).shape)
assert_equal((3,), norm(X, axis=0).shape)
assert_equal((4,), norm(X, axis=1).shape)
| bsd-3-clause |
anomam/pvlib-python | pvlib/bifacial.py | 1 | 7458 | """
The ``bifacial`` module contains functions for modeling back surface
plane-of-array irradiance under various conditions.
"""
import pandas as pd
import numpy as np
def pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
axis_azimuth,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=3, index_observed_pvrow=1,
rho_front_pvrow=0.03, rho_back_pvrow=0.05,
horizon_band_angle=15.,
run_parallel_calculations=True, n_workers_for_parallel_calcs=2):
"""
Calculate front and back surface plane-of-array irradiance on
a fixed tilt or single-axis tracker PV array configuration, and using
the open-source "pvfactors" package. pvfactors implements the model
described in [1]_.
Please refer to pvfactors online documentation for more details:
https://sunpower.github.io/pvfactors/
Parameters
----------
solar_azimuth: numeric
Sun's azimuth angles using pvlib's azimuth convention (deg)
solar_zenith: numeric
Sun's zenith angles (deg)
surface_azimuth: numeric
Azimuth angle of the front surface of the PV modules, using pvlib's
convention (deg)
surface_tilt: numeric
Tilt angle of the PV modules, going from 0 to 180 (deg)
axis_azimuth: float
Azimuth angle of the rotation axis of the PV modules, using pvlib's
convention (deg). This is supposed to be fixed for all timestamps.
timestamps: datetime or DatetimeIndex
List of simulation timestamps
dni: numeric
Direct normal irradiance (W/m2)
dhi: numeric
Diffuse horizontal irradiance (W/m2)
gcr: float
Ground coverage ratio of the pv array
pvrow_height: float
Height of the pv rows, measured at their center (m)
pvrow_width: float
Width of the pv rows in the considered 2D plane (m)
albedo: float
Ground albedo
n_pvrows: int, default 3
Number of PV rows to consider in the PV array
index_observed_pvrow: int, default 1
Index of the PV row whose incident irradiance will be returned. Indices
of PV rows go from 0 to n_pvrows-1.
rho_front_pvrow: float, default 0.03
Front surface reflectivity of PV rows
rho_back_pvrow: float, default 0.05
Back surface reflectivity of PV rows
horizon_band_angle: float, default 15
Elevation angle of the sky dome's diffuse horizon band (deg)
run_parallel_calculations: bool, default True
pvfactors is capable of using multiprocessing. Use this flag to decide
to run calculations in parallel (recommended) or not.
n_workers_for_parallel_calcs: int, default 2
Number of workers to use in the case of parallel calculations. The
'-1' value will lead to using a value equal to the number
of CPU's on the machine running the model.
Returns
-------
front_poa_irradiance: numeric
Calculated incident irradiance on the front surface of the PV modules
(W/m2)
back_poa_irradiance: numeric
Calculated incident irradiance on the back surface of the PV modules
(W/m2)
df_registries: pandas DataFrame
DataFrame containing detailed outputs of the simulation; for
instance the shapely geometries, the irradiance components incident on
all surfaces of the PV array (for all timestamps), etc.
In the pvfactors documentation, this is refered to as the "surface
registry".
References
----------
.. [1] Anoma, Marc Abou, et al. "View Factor Model and Validation for
Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE
Photovoltaic Specialist Conference. 2017.
"""
# Convert pandas Series inputs (and some lists) to numpy arrays
if isinstance(solar_azimuth, pd.Series):
solar_azimuth = solar_azimuth.values
elif isinstance(solar_azimuth, list):
solar_azimuth = np.array(solar_azimuth)
if isinstance(solar_zenith, pd.Series):
solar_zenith = solar_zenith.values
if isinstance(surface_azimuth, pd.Series):
surface_azimuth = surface_azimuth.values
elif isinstance(surface_azimuth, list):
surface_azimuth = np.array(surface_azimuth)
if isinstance(surface_tilt, pd.Series):
surface_tilt = surface_tilt.values
if isinstance(dni, pd.Series):
dni = dni.values
if isinstance(dhi, pd.Series):
dhi = dhi.values
if isinstance(solar_azimuth, list):
solar_azimuth = np.array(solar_azimuth)
# Import pvfactors functions for timeseries calculations.
from pvfactors.run import (run_timeseries_engine,
run_parallel_engine)
# Build up pv array configuration parameters
pvarray_parameters = {
'n_pvrows': n_pvrows,
'axis_azimuth': axis_azimuth,
'pvrow_height': pvrow_height,
'pvrow_width': pvrow_width,
'gcr': gcr,
'rho_front_pvrow': rho_front_pvrow,
'rho_back_pvrow': rho_back_pvrow,
'horizon_band_angle': horizon_band_angle
}
# Run pvfactors calculations: either in parallel or serially
if run_parallel_calculations:
report = run_parallel_engine(
PVFactorsReportBuilder, pvarray_parameters,
timestamps, dni, dhi,
solar_zenith, solar_azimuth,
surface_tilt, surface_azimuth,
albedo, n_processes=n_workers_for_parallel_calcs)
else:
report = run_timeseries_engine(
PVFactorsReportBuilder.build, pvarray_parameters,
timestamps, dni, dhi,
solar_zenith, solar_azimuth,
surface_tilt, surface_azimuth,
albedo)
# Turn report into dataframe
df_report = pd.DataFrame(report, index=timestamps)
return df_report.total_inc_front, df_report.total_inc_back
class PVFactorsReportBuilder(object):
"""In pvfactors, a class is required to build reports when running
calculations with multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Reports will have total incident irradiance on front and
back surface of center pvrow (index=1)"""
# Initialize the report as a dictionary
if report is None:
report = {'total_inc_back': [], 'total_inc_front': []}
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['total_inc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['total_inc_front'].append(
pvrow.front.get_param_weighted('qinc'))
else:
# No calculation is performed when the sun is down
report['total_inc_back'].append(np.nan)
report['total_inc_front'].append(np.nan)
return report
@staticmethod
def merge(reports):
"""Works for dictionary reports. Merges the reports list of
dictionaries in a single dictionary. The list of the first
dictionary are extended by those of all subsequent lists."""
report = reports[0]
keys_report = list(report.keys())
for other_report in reports[1:]: # loop won't run if len(reports) < 2
for key in keys_report:
report[key] += other_report[key]
return report
| bsd-3-clause |
bcwolven/spidercam | spidercam.py | 1 | 12277 | #!/usr/local/bin/python3
import os
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
# Convolution method?
# This was slow at best, and crashed on the high-res Moon pic for an
# authentically sized "spider FWHM."
# import scipy.ndimage as spnd
# self.imgSpdr[:,:,channel] = spnd.convolve(imgChan[:,:,channel],spiderPSF,
# mode='nearest')
# This method is much faster, with no crashing for a realistic kernel size
import scipy.signal as spsg
thisCode = os.path.realpath(__file__)
projRoot = os.path.dirname(thisCode)
class arachnivision():
"""spidercam, spidercam, sees the sky like a spider can."""
def __init__(self):
self.imgOrig = None
self.imgDimX = 0
self.imgDimY = 0
self.numChan = 0
self.numFWHM = 5. # Size of 2-D kernel in FWHM (+/- numFWHM/2)
self.peopleAngRes = 0.
self.spiderAngRes = 0.
self.sourceScale = 0.
self.spiderVisRespFile = \
"Habronattus_pyrrithrix_Photoreceptor_absorbance.csv"
def _setupFigure(self,figID):
# Is this scaling because of a matplotlib convention or did I just
# happen to use a 100 DPI image for testing? TBD - todo
self.figwid = 0.01*self.imgDimX
self.figrat = self.imgDimY/self.imgDimX
plt.figure(figID,figsize=(self.figwid,self.figwid*self.figrat))
plt.subplots_adjust(left=0.000,right=1.000,top=1.000,bottom=0.000)
plt.axes().set_xticks([])
plt.axes().set_yticks([])
def _makeGaussian(self,size,fwhm=3,center=None):
"""Make a square gaussian kernel (borrowed from stack overflow)
- Size is the length of a side of the square.
- Fwhm is full-width-half-maximum, which can be thought of as an
effective radius.
NOTE1 - kernel now normalized - was previously scaled so range was 0->1
NOTE2 - There's probably a package function for this somewhere already
"""
# x = np.arange(0, size, 1, float)
x = np.arange(0,size,1,dtype=np.longdouble)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
kernel = np.exp(-4.*np.log(2.) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
kernel /= np.sum(kernel)
return kernel
def setSourcePlateScale(self,degreesPerPixel):
"""Set value for degrees per pixel in the input/source image"""
self.sourceScale = degreesPerPixel
def setPeopleAngularResolution(self,fwhmInDegrees):
"""Set value for FWHM of PSF in degrees, assumed to be Gaussian"""
self.peopleAngRes = fwhmInDegrees
def setSpiderAngularResolution(self,fwhmInDegrees):
"""Set value for FWHM of PSF in degrees, assumed to be Gaussian"""
self.spiderAngRes = fwhmInDegrees
def loadSpiderData(self):
csvFile = os.path.join(projRoot,self.spiderVisRespFile)
# Reads data but indexing of resulting 2-D array does not work as expected?
# import csv
# with open(csvFile,'rU') as csviter:
# csvRows = csv.reader(csviter)
# respData = []
# for rr,row in enumerate(csvRows):
# if rr == 0:
# columnHeaders = row
# else:
# respData.append([float(xx) for xx in row])
# respData = np.array(respData)
# print(len(columnHeaders),len(respData),np.shape(respData))
# print(respData[0])
# print(respData[-1])
# print(respData[0][0],respData[0][1],respData[0][2],respData[0][3])
# print([respData[0:10]][0])
# respData = np.reshape(respData)
# import sys
# sys.exit()
respData = np.genfromtxt(csvFile,dtype=float,delimiter=',',names=True)
colmName = respData.dtype.names
print("Read file: %s" % self.spiderVisRespFile)
print("Extracted columns:")
for header in colmName:
print(header)
plt.figure('spiderVisResp')
plt.axes().set_title(self.spiderVisRespFile)
plt.axes().set_xlabel('Wavelength (nm)')
plt.axes().set_ylabel('Normalized Photoreceptor Absorbance')
plt.grid(True)
plt.plot(respData[colmName[0]][:],respData[colmName[1]][:],color='b',
label=colmName[1])
plt.plot(respData[colmName[0]][:],respData[colmName[2]][:],color='g',
label=colmName[2])
plt.plot(respData[colmName[0]][:],respData[colmName[3]][:],color='r',
label=colmName[3])
plt.legend(loc='lower center',fontsize=6)
plt.savefig(os.path.join(projRoot,"photoreceptor-absorbance.png"))
# plt.clf()
# plt.cla()
def loadSourceImage(self,srcImg):
"""Load source image and set dimensions. Assuming color channels are in
last dimension at the moment."""
self.srcImg = srcImg # File basename, without full path
self.imgOrig = mpimg.imread(os.path.join(projRoot,srcImg))
imgDims = np.shape(self.imgOrig)
self.imgDimX = imgDims[1] # Yeah, this isn't IDL, deal with it
self.imgDimY = imgDims[0] # Yeah, this still isn't IDL, deal with it
self.numChan = imgDims[2]
print("Loaded source image: %s" % self.srcImg)
def sourceToEyeball(self):
"""Take a source image and 1) convolve with 0.02º FWHM Gaussian PSF to
estimate what people would see with the naked eye, and 2) convolve with
0.07º FWHM Gaussian PSF and modify the color balance to try and
replicate what a jumping spider might see if it gazed up at the night
sky."""
imgChan = self.imgOrig.astype('float64')/255. # Rescale 0-255 -> 0.-1.
self.imgPepl = np.empty_like(imgChan) # Store convolved version here
self.imgSpdr = np.empty_like(imgChan) # Store convolved version here
# Make a 2-D Gaussian kernel for people and spider eye PSFs.
# FwHM and corresponding kernel size are image dependent, set by angular
# resolution of the particular critter's visual system and the plate
# scale (degrees per pixel here) of the image. The plate scale and
# visual angular reolutions are assumed to be the
# same in both dimensions at present.
peopleFWHM = self.peopleAngRes/self.sourceScale
peopleSize = np.int(self.numFWHM*peopleFWHM) # Extend kernel to N FWHM
peoplePSF = self._makeGaussian(peopleSize,fwhm=peopleFWHM)
# peoplePSF /= np.sum(peoplePSF) # Normalize kernel... or else
spiderFWHM = self.spiderAngRes/self.sourceScale
spiderSize = np.int(self.numFWHM*spiderFWHM) # Extend kernel to N FWHM
spiderPSF = self._makeGaussian(spiderSize,fwhm=spiderFWHM)
# spiderPSF /= np.sum(spiderPSF) # Normalize kernel... or else
# Do people-eye convolution, using original color channel weighting.
for channel in range(self.numChan):
self.imgPepl[:,:,channel] = spsg.fftconvolve(imgChan[:,:,channel],
peoplePSF,mode='same')
# Tweak color balance for spider version - just an utter SWAG right now.
# Eventually this ought to be its own method, relying on the spectral
# information of the source image and the spectral response of the
# critter visual system.
imgChan[:,:,0] *= 0.85 # Red
imgChan[:,:,1] *= 1.00 # Green
imgChan[:,:,2] *= 0.85 # Blue
# Do spider eye convolution, using modified color channel weighting.
for channel in range(self.numChan):
self.imgSpdr[:,:,channel] = spsg.fftconvolve(imgChan[:,:,channel],
spiderPSF,mode='same')
def saveSourceImage(self):
self._setupFigure('source')
plt.imshow(jumper.imgOrig)
print("Saving unaltered version.")
plt.savefig(os.path.join(projRoot,"source-"+self.srcImg))
def savePeopleImage(self):
self._setupFigure('people')
plt.imshow(jumper.imgPepl)
print("Saving people/naked eye version.")
plt.savefig(os.path.join(projRoot,"people-"+self.srcImg))
def saveSpiderImage(self):
self._setupFigure('spider')
plt.imshow(jumper.imgSpdr)
print("Saving spider-eyes-ed version.")
plt.savefig(os.path.join(projRoot,"spider-"+self.srcImg))
if __name__ == "__main__":
# Use argparse to... parse args
parser = argparse.ArgumentParser(description="Simulate what a jumping "
"spider might see when they look at an "
"object in the night sky.")
parser.add_argument("-i","--image",required=False,
default="20141008tleBaldridge001.jpg",
help="Source image")
# default="beletskYairglow_pano.jpg",
# 2250 pixels for moon diameter of ~0.5 degrees.
parser.add_argument("-d","--plate-scale",required=False,type=float,
default=2.222e-04,help="Plate scale of source image - "
"For default image is 2.222e-04 degrees/pixel")
parser.add_argument("-p","--people-resolution",required=False,type=float,
default=0.007,help="Resolution to use for human eye - "
"default is foveal resolution of 0.007 degrees")
parser.add_argument("-s","--spider-resolution",required=False,type=float,
default=0.070,help="Resolution to use for spider eye - "
"default is resolution of 0.07 degrees")
# Process arguments - no screening for valid inputs done here beyond what
# argparse does internally right now.
args = parser.parse_args()
srcImg = args.image # relative path from directory containing this file
# Create instance of class to load and manipulate image.
jumper = arachnivision()
# Set plate scale (degrees/pixel) of the source image.
jumper.setSourcePlateScale(args.plate_scale)
# Set the visual angular resolution of the two critters in question -
# "People" and "Spider" ony at the moment. Perhaps make more general later?
jumper.setPeopleAngularResolution(args.people_resolution)
jumper.setSpiderAngularResolution(args.spider_resolution)
# Load spider photoreceptor absorbance curves
jumper.loadSpiderData()
# Load source image
jumper.loadSourceImage(srcImg)
# Save copy of original with "source" stuck at front of name - have we done
# any violence to it unintentionally in loading and saving? Sanity check...
jumper.saveSourceImage()
# Modify it to something resembling what spider would see?
jumper.sourceToEyeball()
# Save convolved version of original with "people" stuck at front of name.
# This is identical to the original in terms of color balance, but uses a
# people-vision-specific angular resolution.
jumper.savePeopleImage()
# Save "spider-eyes-ed" version with "spider" stuck at front of name. This
# is different from the original in terms of both color balance and the fact
# that it uses a spider-vision-specific angular resolution.
jumper.saveSpiderImage()
# Miscellaneous discussion notes and source Tweet references:
#
# Jumping spider vision angular resolution quoted to be ~0.07 degrees. Wikipedia
# quotes value for typical human eye to be 0.02 degrees, so only about 3.5 times
# better! But *foveal* resolution is closer to 0.007º or 10 times better.
# Perhaps Wikipedia value is for rods rather than cones? The foveal area of the
# retina sees a swath ~2º wide, located at center of retina.
# "So did some back of envelope calcs. The jumping spider in our avatar
# (Habronattus pyrrithrix) can def see the moon, maybe even craters…"
# https://twitter.com/MorehouseLab/status/872081983819612161
# "Moon diameter is 9.22x10^-3 radians, or ~0.53 deg visual angle. H.
# pyrrithrix can resolve objects up to 0.07 deg visual angle."
# https://twitter.com/MorehouseLab/status/872082579217887232
| mit |
rhattersley/cartopy | lib/cartopy/crs.py | 1 | 90404 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
The crs module defines Coordinate Reference Systems and the transformations
between them.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractproperty
import math
import warnings
import numpy as np
import shapely.geometry as sgeom
from shapely.prepared import prep
import six
from cartopy._crs import CRS, Geodetic, Globe, PROJ4_VERSION
from cartopy._crs import Geocentric # noqa: F401 (flake8 = unused import)
import cartopy.trace
__document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe']
WGS84_SEMIMAJOR_AXIS = 6378137.0
WGS84_SEMIMINOR_AXIS = 6356752.3142
class RotatedGeodetic(CRS):
"""
Define a rotated latitude/longitude coordinate system with spherical
topology and geographical distance.
Coordinates are measured in degrees.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude, pole_latitude,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude
Pole longitude position, in unrotated degrees.
pole_latitude
Pole latitude position, in unrotated degrees.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
globe = globe or Globe(datum='WGS84')
super(RotatedGeodetic, self).__init__(proj4_params, globe=globe)
class Projection(six.with_metaclass(ABCMeta, CRS)):
"""
Define a projected coordinate system with flat topology and Euclidean
distance.
"""
_method_map = {
'Point': '_project_point',
'LineString': '_project_line_string',
'LinearRing': '_project_linear_ring',
'Polygon': '_project_polygon',
'MultiPoint': '_project_multipoint',
'MultiLineString': '_project_multiline',
'MultiPolygon': '_project_multipolygon',
}
@abstractproperty
def boundary(self):
pass
@abstractproperty
def threshold(self):
pass
@abstractproperty
def x_limits(self):
pass
@abstractproperty
def y_limits(self):
pass
@property
def cw_boundary(self):
try:
boundary = self._cw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary)
self._cw_boundary = boundary
return boundary
@property
def ccw_boundary(self):
try:
boundary = self._ccw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary.coords[::-1])
self._ccw_boundary = boundary
return boundary
@property
def domain(self):
try:
domain = self._domain
except AttributeError:
domain = self._domain = sgeom.Polygon(self.boundary)
return domain
def _determine_longitude_bounds(self, central_longitude):
# In new proj, using exact limits will wrap-around, so subtract a
# small epsilon:
epsilon = 1e-10
minlon = -180 + central_longitude
maxlon = 180 + central_longitude
if central_longitude > 0:
maxlon -= epsilon
elif central_longitude < 0:
minlon += epsilon
return minlon, maxlon
def _repr_html_(self):
import cgi
try:
# As matplotlib is not a core cartopy dependency, don't error
# if it's not available.
import matplotlib.pyplot as plt
except ImportError:
# We can't return an SVG of the CRS, so let Jupyter fall back to
# a default repr by returning None.
return None
# Produce a visual repr of the Projection instance.
fig, ax = plt.subplots(figsize=(5, 3),
subplot_kw={'projection': self})
ax.set_global()
ax.coastlines('auto')
ax.gridlines()
buf = six.StringIO()
fig.savefig(buf, format='svg', bbox_inches='tight')
plt.close(fig)
# "Rewind" the buffer to the start and return it as an svg string.
buf.seek(0)
svg = buf.read()
return '{}<pre>{}</pre>'.format(svg, cgi.escape(repr(self)))
def _as_mpl_axes(self):
import cartopy.mpl.geoaxes as geoaxes
return geoaxes.GeoAxes, {'map_projection': self}
def project_geometry(self, geometry, src_crs=None):
"""
Project the given geometry into this projection.
Parameters
----------
geometry
The geometry to (re-)project.
src_crs: optional
The source CRS. Defaults to None.
If src_crs is None, the source CRS is assumed to be a geodetic
version of the target CRS.
Returns
-------
geometry
The projected result (a shapely geometry).
"""
if src_crs is None:
src_crs = self.as_geodetic()
elif not isinstance(src_crs, CRS):
raise TypeError('Source CRS must be an instance of CRS'
' or one of its subclasses, or None.')
geom_type = geometry.geom_type
method_name = self._method_map.get(geom_type)
if not method_name:
raise ValueError('Unsupported geometry '
'type {!r}'.format(geom_type))
return getattr(self, method_name)(geometry, src_crs)
def _project_point(self, point, src_crs):
return sgeom.Point(*self.transform_point(point.x, point.y, src_crs))
def _project_line_string(self, geometry, src_crs):
return cartopy.trace.project_linear(geometry, src_crs, self)
def _project_linear_ring(self, linear_ring, src_crs):
"""
Project the given LinearRing from the src_crs into this CRS and
returns a list of LinearRings and a single MultiLineString.
"""
debug = False
# 1) Resolve the initial lines into projected segments
# 1abc
# def23ghi
# jkl41
multi_line_string = cartopy.trace.project_linear(linear_ring,
src_crs, self)
# Threshold for whether a point is close enough to be the same
# point as another.
threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
# 2) Simplify the segments where appropriate.
if len(multi_line_string) > 1:
# Stitch together segments which are close to continuous.
# This is important when:
# 1) The first source point projects into the map and the
# ring has been cut by the boundary.
# Continuing the example from above this gives:
# def23ghi
# jkl41abc
# 2) The cut ends of segments are too close to reliably
# place into an order along the boundary.
line_strings = list(multi_line_string)
any_modified = False
i = 0
if debug:
first_coord = np.array([ls.coords[0] for ls in line_strings])
last_coord = np.array([ls.coords[-1] for ls in line_strings])
print('Distance matrix:')
np.set_printoptions(precision=2)
x = first_coord[:, np.newaxis, :]
y = last_coord[np.newaxis, :, :]
print(np.abs(x - y).max(axis=-1))
while i < len(line_strings):
modified = False
j = 0
while j < len(line_strings):
if i != j and np.allclose(line_strings[i].coords[0],
line_strings[j].coords[-1],
atol=threshold):
if debug:
print('Joining together {} and {}.'.format(i, j))
last_coords = list(line_strings[j].coords)
first_coords = list(line_strings[i].coords)[1:]
combo = sgeom.LineString(last_coords + first_coords)
if j < i:
i, j = j, i
del line_strings[j], line_strings[i]
line_strings.append(combo)
modified = True
any_modified = True
break
else:
j += 1
if not modified:
i += 1
if any_modified:
multi_line_string = sgeom.MultiLineString(line_strings)
# 3) Check for rings that have been created by the projection stage.
rings = []
line_strings = []
for line in multi_line_string:
if len(line.coords) > 3 and np.allclose(line.coords[0],
line.coords[-1],
atol=threshold):
result_geometry = sgeom.LinearRing(line.coords[:-1])
rings.append(result_geometry)
else:
line_strings.append(line)
# If we found any rings, then we should re-create the multi-line str.
if rings:
multi_line_string = sgeom.MultiLineString(line_strings)
return rings, multi_line_string
def _project_multipoint(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
geoms.append(self._project_point(geom, src_crs))
if geoms:
return sgeom.MultiPoint(geoms)
else:
return sgeom.MultiPoint()
def _project_multiline(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_line_string(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
return sgeom.MultiLineString(geoms)
else:
return []
def _project_multipolygon(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_polygon(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
result = sgeom.MultiPolygon(geoms)
else:
result = sgeom.MultiPolygon()
return result
def _project_polygon(self, polygon, src_crs):
"""
Return the projected polygon(s) derived from the given polygon.
"""
# Determine orientation of polygon.
# TODO: Consider checking the internal rings have the opposite
# orientation to the external rings?
if src_crs.is_geodetic():
is_ccw = True
else:
is_ccw = polygon.exterior.is_ccw
# Project the polygon exterior/interior rings.
# Each source ring will result in either a ring, or one or more
# lines.
rings = []
multi_lines = []
for src_ring in [polygon.exterior] + list(polygon.interiors):
p_rings, p_mline = self._project_linear_ring(src_ring, src_crs)
if p_rings:
rings.extend(p_rings)
if len(p_mline) > 0:
multi_lines.append(p_mline)
# Convert any lines to rings by attaching them to the boundary.
if multi_lines:
rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw))
# Resolve all the inside vs. outside rings, and convert to the
# final MultiPolygon.
return self._rings_to_multi_polygon(rings, is_ccw)
def _attach_lines_to_boundary(self, multi_line_strings, is_ccw):
"""
Return a list of LinearRings by attaching the ends of the given lines
to the boundary, paying attention to the traversal directions of the
lines and boundary.
"""
debug = False
debug_plot_edges = False
# Accumulate all the boundary and segment end points, along with
# their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
def filter_last(t):
return t.kind or t.data[1] == 'first'
edge_things = list(filter(filter_last, edge_things))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_ge(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i:
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
linear_rings = [
sgeom.LinearRing(linear_ring)
for linear_ring in processed_ls
if len(linear_ring.coords) > 2 and linear_ring.is_valid]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
# Use shapely buffer in an attempt to fix invalid geometries
polygon = sgeom.Polygon(ring).buffer(0)
if not polygon.is_empty and polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
Note
----
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
# Extend the limits a tiny amount to allow for precision mistakes
epsilon = 1.e-10
x_limits = (self.x_limits[0] - epsilon, self.x_limits[1] + epsilon)
y_limits = (self.y_limits[0] - epsilon, self.y_limits[1] + epsilon)
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(six.with_metaclass(ABCMeta, Projection)):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
# XXX Should this be a LinearRing?
w, h = self._half_width, self._half_height
return sgeom.LineString([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(six.with_metaclass(ABCMeta,
_RectangularProjection)):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Define a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, -2 * np.pi, n) # Clockwise boundary.
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Return a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
Warning
-------
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None and isinstance(src_crs, PlateCarree):
self_params = self.proj4_params.copy()
src_params = src_crs.proj4_params.copy()
self_params.pop('lon_0'), src_params.pop('lon_0')
xs, ys = vertices[:, 0], vertices[:, 1]
potential = (self_params == src_params and
self.y_limits[0] <= ys.min() and
self.y_limits[1] >= ys.max())
if potential:
mod = np.diff(src_crs.x_limits)[0]
bboxes, proj_offset = self._bbox_and_offset(src_crs)
x_lim = xs.min(), xs.max()
for poly in bboxes:
# Arbitrarily choose the number of moduli to look
# above and below the -180->180 range. If data is beyond
# this range, we're not going to transform it quickly.
for i in [-1, 0, 1, 2]:
offset = mod * i - proj_offset
if ((poly[0] + offset) <= x_lim[0] and
(poly[1] + offset) >= x_lim[1]):
return_value = vertices + [[-offset, 0]]
break
if return_value is not None:
break
return return_value
class TransverseMercator(Projection):
"""
A Transverse Mercator projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
scale_factor=1.0, globe=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at the central meridian. Defaults to 1.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
proj4_params = [('proj', 'tmerc'), ('lon_0', central_longitude),
('lat_0', central_latitude), ('k', scale_factor),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(TransverseMercator, self).__init__(proj4_params, globe=globe)
@property
def threshold(self):
return 1e4
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return (-2e7, 2e7)
@property
def y_limits(self):
return (-1e7, 1e7)
class OSGB(TransverseMercator):
def __init__(self):
super(OSGB, self).__init__(central_longitude=-2, central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=Globe(datum='OSGB36', ellipse='airy'))
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (0, 7e5)
@property
def y_limits(self):
return (0, 13e5)
class OSNI(TransverseMercator):
def __init__(self):
globe = Globe(semimajor_axis=6377340.189,
semiminor_axis=6356034.447938534)
super(OSNI, self).__init__(central_longitude=-8,
central_latitude=53.5,
scale_factor=1.000035,
false_easting=200000,
false_northing=250000,
globe=globe)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (18814.9667, 386062.3293)
@property
def y_limits(self):
return (11764.8481, 464720.9559)
class UTM(Projection):
"""
Universal Transverse Mercator projection.
"""
def __init__(self, zone, southern_hemisphere=False, globe=None):
"""
Parameters
----------
zone
The numeric zone of the UTM required.
southern_hemisphere: optional
Set to True if the zone is in the southern hemisphere. Defaults to
False.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
proj4_params = [('proj', 'utm'),
('units', 'm'),
('zone', zone)]
if southern_hemisphere:
proj4_params.append(('south', None))
super(UTM, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def threshold(self):
return 1e2
@property
def x_limits(self):
easting = 5e5
# allow 50% overflow
return (0 - easting/2, 2 * easting + easting/2)
@property
def y_limits(self):
northing = 1e7
# allow 50% overflow
return (0 - northing, 2 * northing + northing/2)
class EuroPP(UTM):
"""
UTM Zone 32 projection for EuroPP domain.
Ellipsoid is International 1924, Datum is ED50.
"""
def __init__(self):
globe = Globe(ellipse='intl')
super(EuroPP, self).__init__(32, globe=globe)
@property
def x_limits(self):
return (-1.4e6, 2e6)
@property
def y_limits(self):
return (4e6, 7.9e6)
class Mercator(Projection):
"""
A Mercator projection.
"""
def __init__(self, central_longitude=0.0,
min_latitude=-80.0, max_latitude=84.0,
globe=None, latitude_true_scale=None,
false_easting=0.0, false_northing=0.0, scale_factor=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
min_latitude: optional
The maximum southerly extent of the projection. Defaults
to -80 degrees.
max_latitude: optional
The maximum northerly extent of the projection. Defaults
to 84 degrees.
globe: A :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
latitude_true_scale: optional
The latitude where the scale is 1. Defaults to 0 degrees.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at natural origin. Defaults to unused.
Notes
-----
Only one of ``latitude_true_scale`` and ``scale_factor`` should
be included.
"""
proj4_params = [('proj', 'merc'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing),
('units', 'm')]
# If it's None, we don't pass it to Proj4, in which case its default
# of 0.0 will be used.
if latitude_true_scale is not None:
proj4_params.append(('lat_ts', latitude_true_scale))
if scale_factor is not None:
if latitude_true_scale is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "latitude_true_scale". ')
else:
proj4_params.append(('k_0', scale_factor))
super(Mercator, self).__init__(proj4_params, globe=globe)
# Calculate limits.
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
limits = self.transform_points(Geodetic(),
np.array([minlon, maxlon]),
np.array([min_latitude, max_latitude]))
self._x_limits = tuple(limits[..., 0])
self._y_limits = tuple(limits[..., 1])
self._threshold = min(np.diff(self.x_limits)[0] / 720,
np.diff(self.y_limits)[0] / 360)
def __eq__(self, other):
res = super(Mercator, self).__eq__(other)
if hasattr(other, "_y_limits") and hasattr(other, "_x_limits"):
res = res and self._y_limits == other._y_limits and \
self._x_limits == other._x_limits
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self._x_limits, self._y_limits))
@property
def threshold(self):
return self._threshold
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# Define a specific instance of a Mercator projection, the Google mercator.
Mercator.GOOGLE = Mercator(min_latitude=-85.0511287798066,
max_latitude=85.0511287798066,
globe=Globe(ellipse=None,
semimajor_axis=WGS84_SEMIMAJOR_AXIS,
semiminor_axis=WGS84_SEMIMAJOR_AXIS,
nadgrids='@null'))
# Deprecated form
GOOGLE_MERCATOR = Mercator.GOOGLE
class LambertCylindrical(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'cea'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
super(LambertCylindrical, self).__init__(proj4_params, 180,
math.degrees(1), globe=globe)
@property
def threshold(self):
return 0.5
class LambertConformal(Projection):
"""
A Lambert Conformal conic projection.
"""
def __init__(self, central_longitude=-96.0, central_latitude=39.0,
false_easting=0.0, false_northing=0.0,
secant_latitudes=None, standard_parallels=None,
globe=None, cutoff=-30):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to -96.
central_latitude: optional
The central latitude. Defaults to 39.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
secant_latitudes: optional
Secant latitudes. This keyword is deprecated in v0.12 and directly
replaced by ``standard parallels``. Defaults to None.
standard_parallels: optional
Standard parallel latitude(s). Defaults to (33, 45).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
cutoff: optional
Latitude of map cutoff.
The map extends to infinity opposite the central pole
so we must cut off the map drawing before then.
A value of 0 will draw half the globe. Defaults to -30.
"""
proj4_params = [('proj', 'lcc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if secant_latitudes and standard_parallels:
raise TypeError('standard_parallels replaces secant_latitudes.')
elif secant_latitudes is not None:
warnings.warn('secant_latitudes has been deprecated in v0.12. '
'The standard_parallels keyword can be used as a '
'direct replacement.')
standard_parallels = secant_latitudes
elif standard_parallels is None:
# The default. Put this as a keyword arg default once
# secant_latitudes is removed completely.
standard_parallels = (33, 45)
n_parallels = len(standard_parallels)
if not 1 <= n_parallels <= 2:
raise ValueError('1 or 2 standard parallels must be specified. '
'Got {} ({})'.format(n_parallels,
standard_parallels))
proj4_params.append(('lat_1', standard_parallels[0]))
if n_parallels == 2:
proj4_params.append(('lat_2', standard_parallels[1]))
super(LambertConformal, self).__init__(proj4_params, globe=globe)
# Compute whether this projection is at the "north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = np.empty(n + 2)
lats = np.full(n + 2, cutoff)
lons[0] = lons[-1] = 0
lats[0] = lats[-1] = plat
if plat == 90:
# Ensure clockwise
lons[1:-1] = np.linspace(central_longitude + 180 - 0.001,
central_longitude - 180 + 0.001, n)
else:
lons[1:-1] = np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n)
points = self.transform_points(PlateCarree(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# Find the antipode, and shift it a small amount in latitude to
# approximate the extent of the projection:
lon = central_longitude + 180
sign = np.sign(central_latitude) or 1
lat = -central_latitude + sign * 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
def __init__(self, central_longitude=0.0, globe=None):
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1), ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = np.float(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(globe.semiminor_axis or a)
if b != a or globe.ellipse is not None:
warnings.warn('The proj "mill" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
# See Snyder, 1987. Eqs (11-1) and (11-2) substituting maximums of
# (lambda-lambda0)=180 and phi=90 to get limits.
super(Miller, self).__init__(proj4_params,
a * np.pi, a * 2.303412543376391,
globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
A rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude: optional
Pole longitude position, in unrotated degrees. Defaults to 0.
pole_latitude: optional
Pole latitude position, in unrotated degrees. Defaults to 0.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
An optional :class:`cartopy.crs.Globe`. Defaults to a "WGS84"
datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
def __init__(self, central_latitude=0.0,
central_longitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude),
('lon_0', central_longitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None,
scale_factor=None, globe=None):
# Warn when using Stereographic with proj < 5.0.0 due to
# incorrect transformation with lon_0=0 (see
# https://github.com/OSGeo/proj.4/issues/194).
if central_latitude == 0:
if PROJ4_VERSION != ():
if PROJ4_VERSION < (5, 0, 0):
warnings.warn(
'The Stereographic projection in Proj older than '
'5.0.0 incorrectly transforms points when '
'central_latitude=0. Use this projection with '
'caution.')
else:
warnings.warn(
'Cannot determine Proj version. The Stereographic '
'projection may be unreliable and should be used with '
'caution.')
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude is not None:
if central_latitude not in (-90., 90.):
warnings.warn('"true_scale_latitude" parameter is only used '
'for polar stereographic projections. Consider '
'the use of "scale_factor" instead.')
proj4_params.append(('lat_ts', true_scale_latitude))
if scale_factor is not None:
if true_scale_latitude is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "true_scale_latitude". '
'Ignoring "scale_factor".')
else:
proj4_params.append(('k_0', scale_factor))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is +90
globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is -90
globe=globe)
class Orthographic(Projection):
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
if PROJ4_VERSION != ():
if (5, 0, 0) <= PROJ4_VERSION < (5, 1, 0):
warnings.warn(
'The Orthographic projection in Proj between 5.0.0 and '
'5.1.0 incorrectly transforms points. Use this projection '
'with caution.')
else:
warnings.warn(
'Cannot determine Proj version. The Orthographic projection '
'may be unreliable and should be used with caution.')
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
if b != a:
warnings.warn('The proj "ortho" projection does not appear to '
'handle elliptical globes.')
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, b * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _WarpedRectangularProjection(six.with_metaclass(ABCMeta, Projection)):
def __init__(self, proj4_params, central_longitude,
false_easting=None, false_northing=None, globe=None):
if false_easting is not None:
proj4_params += [('x_0', false_easting)]
if false_northing is not None:
proj4_params += [('y_0', false_northing)]
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Eckert(six.with_metaclass(ABCMeta, _WarpedRectangularProjection)):
"""
An Eckert projection.
This class implements all the methods common to the Eckert family of
projections.
"""
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "{}" projection does not handle '
'elliptical globes.'.format(self._proj_name))
proj4_params = [('proj', self._proj_name),
('lon_0', central_longitude)]
super(_Eckert, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class EckertI(_Eckert):
"""
An Eckert I projection.
This projection is pseudocylindrical, but not equal-area. Both meridians
and parallels are straight lines. Its equal-area pair is :class:`EckertII`.
"""
_proj_name = 'eck1'
class EckertII(_Eckert):
"""
An Eckert II projection.
This projection is pseudocylindrical, and equal-area. Both meridians and
parallels are straight lines. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertI`.
"""
_proj_name = 'eck2'
class EckertIII(_Eckert):
"""
An Eckert III projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its equal-area pair is :class:`EckertIV`.
"""
_proj_name = 'eck3'
class EckertIV(_Eckert):
"""
An Eckert IV projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertIII`.
It is commonly used for world maps.
"""
_proj_name = 'eck4'
class EckertV(_Eckert):
"""
An Eckert V projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are sinusoidal arcs. Its
equal-area pair is :class:`EckertVI`.
"""
_proj_name = 'eck5'
class EckertVI(_Eckert):
"""
An Eckert VI projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are sinusoidal arcs. Its
non-equal-area pair with equally-spaced parallels is :class:`EckertV`.
It is commonly used for world maps.
"""
_proj_name = 'eck6'
class EqualEarth(_WarpedRectangularProjection):
u"""
An Equal Earth projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are equally-spaced arcs.
It is intended for world maps.
Note
----
To use this projection, you must be using Proj 5.2.0 or newer.
References
----------
Bojan \u0160avri\u010d, Tom Patterson & Bernhard Jenny (2018) The Equal
Earth map projection, International Journal of Geographical Information
Science, DOI: 10.1080/13658816.2018.1504949
"""
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
"""
if PROJ4_VERSION < (5, 2, 0):
raise ValueError('The EqualEarth projection requires Proj version '
'5.2.0, but you are using {}.'
.format('.'.join(str(v) for v in PROJ4_VERSION)))
proj_params = [('proj', 'eqearth'), ('lon_0', central_longitude)]
super(EqualEarth, self).__init__(proj_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Mollweide(_WarpedRectangularProjection):
"""
A Mollweide projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Poles are points.
It is commonly used for world maps, or interrupted with several central
meridians.
"""
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "moll" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'moll'), ('lon_0', central_longitude)]
super(Mollweide, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Robinson(_WarpedRectangularProjection):
"""
A Robinson projection.
This projection is pseudocylindrical, and a compromise that is neither
equal-area nor conformal. Parallels are unequally-spaced straight lines,
and meridians are curved lines of no particular form.
It is commonly used for "visually-appealing" world maps.
"""
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
# Warn when using Robinson with proj 4.8 due to discontinuity at
# 40 deg N introduced by incomplete fix to issue #113 (see
# https://github.com/OSGeo/proj.4/issues/113).
if PROJ4_VERSION != ():
if (4, 8) <= PROJ4_VERSION < (4, 9):
warnings.warn('The Robinson projection in the v4.8.x series '
'of Proj contains a discontinuity at '
'40 deg latitude. Use this projection with '
'caution.')
else:
warnings.warn('Cannot determine Proj version. The Robinson '
'projection may be unreliable and should be used '
'with caution.')
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "robin" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'robin'), ('lon_0', central_longitude)]
super(Robinson, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e4
def transform_point(self, x, y, src_crs):
"""
Capture and handle any input NaNs, else invoke parent function,
:meth:`_WarpedRectangularProjection.transform_point`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
"""
if np.isnan(x) or np.isnan(y):
result = (np.nan, np.nan)
else:
result = super(Robinson, self).transform_point(x, y, src_crs)
return result
def transform_points(self, src_crs, x, y, z=None):
"""
Capture and handle NaNs in input points -- else as parent function,
:meth:`_WarpedRectangularProjection.transform_points`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
Instead, we invalidate any of the points that contain a NaN.
"""
input_point_nans = np.isnan(x) | np.isnan(y)
if z is not None:
input_point_nans |= np.isnan(z)
handle_nans = np.any(input_point_nans)
if handle_nans:
# Remove NaN points from input data to avoid the error.
x[input_point_nans] = 0.0
y[input_point_nans] = 0.0
if z is not None:
z[input_point_nans] = 0.0
result = super(Robinson, self).transform_points(src_crs, x, y, z)
if handle_nans:
# Result always has shape (N, 3).
# Blank out each (whole) point where we had a NaN in the input.
result[input_point_nans] = np.nan
return result
class InterruptedGoodeHomolosine(Projection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'igh'), ('lon_0', central_longitude)]
super(InterruptedGoodeHomolosine, self).__init__(proj4_params,
globe=globe)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
epsilon = 1e-10
# Obtain boundary points
n = 31
top_interrupted_lons = (-40.0,)
bottom_interrupted_lons = (80.0, -20.0, -100.0)
lons = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
lats = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
end = 0
# Left boundary
lons[end:end + n] = minlon
lats[end:end + n] = np.linspace(-90, 90, n)
end += n
# Top boundary
for lon in top_interrupted_lons:
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(90, 0, n)
end += n
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(0, 90, n)
end += n
# Right boundary
lons[end:end + n] = maxlon
lats[end:end + n] = np.linspace(90, -90, n)
end += n
# Bottom boundary
for lon in bottom_interrupted_lons:
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(-90, 0, n)
end += n
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(0, -90, n)
end += n
# Close loop
lons[-1] = minlon
lats[-1] = -90
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 2e4
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Satellite(Projection):
def __init__(self, projection, satellite_height=35785831,
central_longitude=0.0, central_latitude=0.0,
false_easting=0, false_northing=0, globe=None,
sweep_axis=None):
proj4_params = [('proj', projection), ('lon_0', central_longitude),
('lat_0', central_latitude), ('h', satellite_height),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
if sweep_axis:
proj4_params.append(('sweep', sweep_axis))
super(_Satellite, self).__init__(proj4_params, globe=globe)
def _set_boundary(self, coords):
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Geostationary(_Satellite):
"""
A view appropriate for satellites in Geostationary Earth orbit.
Perspective view looking directly down from above a point on the equator.
In this projection, the projected coordinates are scanning angles measured
from the satellite looking directly downward, multiplied by the height of
the satellite.
"""
def __init__(self, central_longitude=0.0, satellite_height=35785831,
false_easting=0, false_northing=0, globe=None,
sweep_axis='y'):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
sweep_axis: 'x' or 'y', optional. Defaults to 'y'.
Controls which axis is scanned first, and thus which angle is
applied first. The default is appropriate for Meteosat, while
'x' should be used for GOES.
"""
super(Geostationary, self).__init__(
projection='geos',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=0.0,
false_easting=false_easting,
false_northing=false_northing,
globe=globe,
sweep_axis=sweep_axis)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
h = np.float(satellite_height)
# These are only exact for a spherical Earth, owing to assuming a is
# constant. Handling elliptical would be much harder for this.
sin_max_th = a / (a + h)
tan_max_th = a / np.sqrt((a + h) ** 2 - a ** 2)
# Using Napier's rules for right spherical triangles
# See R2 and R6 (x and y coords are h * b and h * a, respectively):
# https://en.wikipedia.org/wiki/Spherical_trigonometry
t = np.linspace(0, -2 * np.pi, 61) # Clockwise boundary.
coords = np.vstack([np.arctan(tan_max_th * np.cos(t)),
np.arcsin(sin_max_th * np.sin(t))])
coords *= h
coords += np.array([[false_easting], [false_northing]])
self._set_boundary(coords)
class NearsidePerspective(_Satellite):
"""
Perspective view looking directly down from above a point on the globe.
In this projection, the projected coordinates are x and y measured from
the origin of a plane tangent to the Earth directly below the perspective
point (e.g. a satellite).
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
central_latitude: float, optional
The central latitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "nsper" projection does not handle '
'elliptical globes.')
super(NearsidePerspective, self).__init__(
projection='nsper',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=central_latitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
h = np.float(satellite_height)
max_x = a * np.sqrt(h / (2 * a + h))
coords = _ellipse_boundary(max_x, max_x,
false_easting, false_northing, 61)
self._set_boundary(coords)
class AlbersEqualArea(Projection):
"""
An Albers Equal Area projection
This projection is conic and equal-area, and is commonly used for maps of
the conterminous United States.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'aea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(AlbersEqualArea, self).__init__(proj4_params, globe=globe)
# bounds
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class AzimuthalEquidistant(Projection):
"""
An Azimuthal Equidistant projection
This projection provides accurate angles about and distances through the
central position. Other angles, distances, or areas may be distorted.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees.
Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
# Warn when using Azimuthal Equidistant with proj < 4.9.2 due to
# incorrect transformation past 90 deg distance (see
# https://github.com/OSGeo/proj.4/issues/246).
if PROJ4_VERSION != ():
if PROJ4_VERSION < (4, 9, 2):
warnings.warn('The Azimuthal Equidistant projection in Proj '
'older than 4.9.2 incorrectly transforms points '
'farther than 90 deg from the origin. Use this '
'projection with caution.')
else:
warnings.warn('Cannot determine Proj version. The Azimuthal '
'Equidistant projection may be unreliable and '
'should be used with caution.')
proj4_params = [('proj', 'aeqd'), ('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting), ('y_0', false_northing)]
super(AzimuthalEquidistant, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
coords = _ellipse_boundary(a * np.pi, b * np.pi,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Sinusoidal(Projection):
"""
A Sinusoidal projection.
This projection is equal-area.
"""
def __init__(self, central_longitude=0.0, false_easting=0.0,
false_northing=0.0, globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'sinu'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing)]
super(Sinusoidal, self).__init__(proj4_params, globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
points = []
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# MODIS data products use a Sinusoidal projection of a spherical Earth
# http://modis-land.gsfc.nasa.gov/GCTP.html
Sinusoidal.MODIS = Sinusoidal(globe=Globe(ellipse=None,
semimajor_axis=6371007.181,
semiminor_axis=6371007.181))
class EquidistantConic(Projection):
"""
An Equidistant Conic projection.
This projection is conic and equidistant, and the scale is true along all
meridians and along one or two specified standard parallels.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'eqdc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(EquidistantConic, self).__init__(proj4_params, globe=globe)
# bounds
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _BoundaryPoint(object):
def __init__(self, distance, kind, data):
"""
A representation for a geometric object which is
connected to the boundary.
Parameters
----------
distance: float
The distance along the boundary that this object
can be found.
kind: bool
Whether this object represents a point from the
pre-computed boundary.
data: point or namedtuple
The actual data that this boundary object represents.
"""
self.distance = distance
self.kind = kind
self.data = data
def __repr__(self):
return '_BoundaryPoint(%r, %r, %s)' % (self.distance, self.kind,
self.data)
def _find_first_ge(a, x):
for v in a:
if v.distance >= x:
return v
# We've gone all the way around, so pick the first point again.
return a[0]
def epsg(code):
"""
Return the projection which corresponds to the given EPSG code.
The EPSG code must correspond to a "projected coordinate system",
so EPSG codes such as 4326 (WGS-84) which define a "geodetic coordinate
system" will not work.
Note
----
The conversion is performed by querying https://epsg.io/ so a
live internet connection is required.
"""
import cartopy._epsg
return cartopy._epsg._EPSGProjection(code)
| lgpl-3.0 |
Aasmi/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
kemerelab/NeuroHMM | helpers/datapackage.py | 3 | 7447 | from path import path
import hashlib
import json
import numpy as np
import os
import pandas as pd
from datetime import datetime
def md5(data_path):
# we need to compute the md5 sum one chunk at a time, because some
# files are too large to fit in memory
md5 = hashlib.md5()
with open(data_path, 'r') as fh:
while True:
chunk = fh.read(128)
if not chunk:
break
md5.update(chunk)
data_hash = md5.hexdigest()
return data_hash
class DataPackage(dict):
def __init__(self, name, licenses):
self['name'] = name
self['datapackage_version'] = '1.0-beta.5'
self['licenses'] = []
for l in licenses:
if not isinstance(l, dict):
if l == 'odc-by':
url = 'http://opendefinition.org/licenses/odc-by'
else:
raise ValueError("unrecognized license: %s" % l)
l = dict(id=l, url=url)
self['licenses'].append(l)
self['title'] = None
self['description'] = None
self['homepage'] = None
self['version'] = '0.0.1'
self['sources'] = []
self['keywords'] = None
self['last_modified'] = datetime.now().isoformat(" ")
self['image'] = None
self['contributors'] = []
self['resources'] = []
self._path = None
self._resource_map = {}
@property
def abspath(self):
return self._path.joinpath(self['name']).abspath()
@classmethod
def load(cls, pth):
pth = path(pth)
dpjson_pth = pth.joinpath("datapackage.json")
if not dpjson_pth.exists():
raise IOError("No metadata file datapackage.json")
with open(dpjson_pth, "r") as fh:
dpjson = json.load(fh)
name = dpjson['name']
licenses = dpjson['licenses']
resources = dpjson['resources']
del dpjson['name']
del dpjson['licenses']
del dpjson['resources']
dp = cls(name=name, licenses=licenses)
dp._path = pth.splitpath()[0]
dp.update(dpjson)
if dp.abspath != pth.abspath():
raise ValueError("malformed datapackage")
for resource in resources:
rname = resource['name']
rfmt = resource['format']
rpth = resource.get('path', None)
rdata = resource.get('data', None)
del resource['name']
del resource['format']
if 'path' in resource:
del resource['path']
if 'data' in resource:
del resource['data']
r = Resource(name=rname, fmt=rfmt, pth=rpth, data=rdata)
r.update(resource)
dp.add_resource(r)
return dp
def add_contributor(self, name, email):
self['contributors'].append(dict(name=name, email=email))
def clear_resources(self):
self['resources'] = []
self._resource_map = {}
def add_resource(self, resource):
self['resources'].append(resource)
self._resource_map[resource['name']] = len(self['resources']) - 1
resource.dpkg = self
def get_resource(self, name):
return self['resources'][self._resource_map[name]]
def load_resource(self, name, verify=True):
return self.get_resource(name).load_data(verify=verify)
def load_resources(self, verify=True):
for resource in self['resources']:
resource.load_data(verify=verify)
def save_metadata(self, dest=None):
if dest:
self._path = dest
self['last_modified'] = datetime.now().isoformat(" ")
metapath = self.abspath.joinpath("datapackage.json")
with open(metapath, "w") as fh:
json.dump(self, fh, indent=2)
def save_data(self, dest=None):
if dest:
self._path = dest
for resource in self['resources']:
resource.save_data()
def save(self, dest=None):
if dest:
self._path = dest
if not self.abspath.exists():
self.abspath.makedirs_p()
self.save_data()
self.save_metadata()
def bump_major_version(self):
major, minor, patch = map(int, self['version'].split("."))
major += 1
minor = 0
patch = 0
self['version'] = "%d.%d.%d" % (major, minor, patch)
return self['version']
def bump_minor_version(self):
major, minor, patch = map(int, self['version'].split("."))
minor += 1
patch = 0
self['version'] = "%d.%d.%d" % (major, minor, patch)
return self['version']
def bump_patch_version(self):
major, minor, patch = map(int, self['version'].split("."))
patch += 1
self['version'] = "%d.%d.%d" % (major, minor, patch)
return self['version']
class Resource(dict):
def __init__(self, name, fmt, data=None, pth=None):
self['name'] = name
self['modified'] = datetime.now().isoformat(" ")
self['format'] = fmt
if pth:
self['path'] = pth
self.data = data
self.dpkg = None
@property
def abspath(self):
if not self.get('path', None):
raise ValueError("no relative path specified")
if not self.dpkg:
raise ValueError("no linked datapackage")
return self.dpkg.abspath.joinpath(self['path'])
@property
def data(self):
return self._data
@data.setter
def data(self, val):
self._data = val
if 'path' not in self:
self['data'] = val
def save_data(self):
self['modified'] = datetime.now().isoformat(" ")
if 'path' not in self:
return
if self['format'] == 'csv':
pd.DataFrame(self.data).to_csv(self.abspath)
elif self['format'] == 'json':
with open(self.abspath, "w") as fh:
json.dump(self.data, fh)
elif self['format'] == 'npy':
np.save(self.abspath, np.array(self.data))
else:
raise ValueError("unsupported format: %s" % self['format'])
self.update_size()
self.update_hash()
def load_data(self, verify=True):
if self.data is not None:
return self.data
# check the file size
if self.update_size():
raise IOError("resource has changed size on disk")
# load the raw data and check md5
if verify and self.update_hash():
raise IOError("resource checksum has changed")
# check format and load data
if self['format'] == 'csv':
data = pd.DataFrame.from_csv(self.abspath)
elif self['format'] == 'json':
with open(self.abspath, "r") as fh:
data = json.load(fh)
elif self['format'] == 'npy':
data = np.load(self.abspath, mmap_mode='c')
else:
raise ValueError("unsupported format: %s" % self['format'])
self.data = data
return self.data
def update_size(self):
old_size = self.get('bytes', None)
new_size = self.abspath.getsize()
self['bytes'] = new_size
return old_size != new_size
def update_hash(self):
old_hash = self.get('hash', None)
new_hash = md5(self.abspath)
self['hash'] = new_hash
return old_hash != new_hash
| mit |
rrozewsk/OurProject | Boostrappers/CDSBootstrapper/CDSVasicekBootstrapper.py | 1 | 3334 | import numpy as np
import pandas as pd
from scipy.optimize import minimize
from parameters import trim_start,trim_end,referenceDate,x0Vas
from datetime import date
from Products.Credit.CDS import CDS
from parameters import freq
from MonteCarloSimulators.Vasicek.vasicekMCSim import MC_Vasicek_Sim
class BootstrapperCDSLadder(object):
# Class with Bootstrapping methods
# It can be used with CDS Ladder or KK Ratings CDS Ladder Converted Values
def __init__(self, start, freq, R,CDSList):
self.start=start
self.freq=freq
self.R=R
self.listCDS=CDSList
# %% GetSpread
def getSpreadBootstrapped(self, xQ, myCDS, s_quotes):
calcCurve=myCDS.changeGuessForSpread(xQ)
error=(s_quotes-calcCurve)**2
return error
def getScheduleComplete(self):
self.datelist = self.myScheduler.getSchedule(start=self.start,end=self.maturity,freq=self.freq,referencedate=self.referencedate)
fullset = list(sorted(list(set(self.datelist)
.union([self.referencedate])
.union([self.start])
.union([self.maturity])
.union([self.observationdate])
)))
return fullset,self.datelist
def getSpreadList(self, xQ):
spread = {}
orderedCDS=[]
for i in range(0,len(self.freq)):
for j in range(0,len(self.listCDS)):
if(self.freq[i] == self.listCDS[j].freq):
orderedCDS.append(self.listCDS[j])
for i in range(0,len(orderedCDS)):
quotes=orderedCDS[i].getSpread()
#print(quotes)
myQ=MC_Vasicek_Sim(x=self.CalibrateCurve(x0=xQ,quotes=quotes,myCDS=orderedCDS[i])[0:4],datelist=[orderedCDS[i].referenceDate,orderedCDS[i].maturity],t_step=1/365,simNumber=1000).getLibor()[0]
myQ=pd.DataFrame(myQ.values,columns=[orderedCDS[i].freq],index=myQ.index)
orderedCDS[i].myQ=myQ
#print(myQ)
spread[orderedCDS[i].freq]=orderedCDS[i].getSpread()
return spread
def getXList(self, xQ):
out = {}
orderedCDS=[]
for i in range(0,len(self.freq)):
for j in range(0,len(self.listCDS)):
if(self.freq[i] == self.listCDS[j].freq):
orderedCDS.append(self.listCDS[j])
for i in range(0,len(orderedCDS)):
quotes=orderedCDS[i].getSpread()
#print(quotes)
out[orderedCDS[i].freq]=self.CalibrateCurve(x0=xQ,quotes=quotes,myCDS=orderedCDS[i]).tolist()
return out
# Fit CDS Ladder using Vasicek,CRI,etc Model. Input parameters are x0
# QFunCIR with the name of the Q Model Function
def CalibrateCurve(self, x0, quotes,myCDS):
# Bootstrap CDS Ladder Directly
results = minimize(self.getSpreadBootstrapped, x0, args=(myCDS, quotes),method='Powell')
print(results.success)
print(myCDS.freq)
return results.x
'''
myLad=BootstrapperCDSLadder(start=trim_start,freq=['3M'],CDSList=[CDS(start_date=trim_start,end_date=date(2010,1,1),freq='3M',coupon=1,referenceDate=trim_start,rating='AAA')],R=.4).getSpreadList(x0Vas)
print(myLad)
''' | mit |
DSLituiev/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/extension/base/setitem.py | 3 | 5431 | import operator
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseSetitemTests(BaseExtensionTests):
def test_setitem_scalar_series(self, data):
arr = pd.Series(data)
arr[0] = data[1]
assert arr[0] == data[1]
def test_setitem_sequence(self, data):
arr = pd.Series(data)
original = data.copy()
arr[[0, 1]] = [data[1], data[0]]
assert arr[0] == original[1]
assert arr[1] == original[0]
@pytest.mark.parametrize('as_array', [True, False])
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
value = [data[0]]
if as_array:
value = data._from_sequence(value)
xpr = 'cannot set using a {} indexer with a different length'
with tm.assert_raises_regex(ValueError, xpr.format('list-like')):
ser[[0, 1]] = value
with tm.assert_raises_regex(ValueError, xpr.format('slice')):
ser[slice(3)] = value
def test_setitem_empty_indxer(self, data):
ser = pd.Series(data)
original = ser.copy()
ser[[]] = []
self.assert_series_equal(ser, original)
def test_setitem_sequence_broadcasts(self, data):
arr = pd.Series(data)
arr[[0, 1]] = data[2]
assert arr[0] == data[2]
assert arr[1] == data[2]
@pytest.mark.parametrize('setter', ['loc', 'iloc'])
def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
setter = getattr(arr, setter)
operator.setitem(setter, 0, data[1])
assert arr[0] == data[1]
def test_setitem_loc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.loc[0, 'B'] = data[1]
assert df.loc[0, 'B'] == data[1]
def test_setitem_loc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.loc[10, 'B'] = data[1]
assert df.loc[10, 'B'] == data[1]
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.loc[10, 'B'] = data[1]
assert df.loc[10, 'B'] == data[1]
def test_setitem_iloc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.iloc[0, 1] = data[1]
assert df.loc[0, 'B'] == data[1]
def test_setitem_iloc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.iloc[10, 0] = data[1]
assert df.loc[10, 'B'] == data[1]
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.iloc[10, 1] = data[1]
assert df.loc[10, 'B'] == data[1]
@pytest.mark.parametrize('as_callable', [True, False])
@pytest.mark.parametrize('setter', ['loc', None])
def test_setitem_mask_aligned(self, data, as_callable, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if as_callable:
mask2 = lambda x: mask
else:
mask2 = mask
if setter:
# loc
target = getattr(ser, setter)
else:
# Series.__setitem__
target = ser
operator.setitem(target, mask2, data[5:7])
ser[mask2] = data[5:7]
assert ser[0] == data[5]
assert ser[1] == data[6]
@pytest.mark.parametrize('setter', ['loc', None])
def test_setitem_mask_broadcast(self, data, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if setter: # loc
target = getattr(ser, setter)
else: # __setitem__
target = ser
operator.setitem(target, mask, data[10])
assert ser[0] == data[10]
assert ser[1] == data[10]
def test_setitem_expand_columns(self, data):
df = pd.DataFrame({"A": data})
result = df.copy()
result['B'] = 1
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, 'B'] = 1
self.assert_frame_equal(result, expected)
# overwrite with new type
result['B'] = data
expected = pd.DataFrame({"A": data, "B": data})
self.assert_frame_equal(result, expected)
def test_setitem_expand_with_extension(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
result = df.copy()
result['B'] = data
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, 'B'] = data
self.assert_frame_equal(result, expected)
def test_setitem_frame_invalid_length(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
xpr = "Length of values does not match length of index"
with tm.assert_raises_regex(ValueError, xpr):
df['B'] = data[:5]
@pytest.mark.xfail(reason="GH-20441: setitem on extension types.")
def test_setitem_tuple_index(self, data):
s = pd.Series(data[:2], index=[(0, 0), (0, 1)])
expected = pd.Series(data.take([1, 1]), index=s.index)
s[(0, 1)] = data[1]
self.assert_series_equal(s, expected)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.