repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
joshloyal/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/tests/test_series.py | 9 | 288883 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import re
import sys
from datetime import datetime, timedelta
import operator
import string
from inspect import getargspec
from itertools import product, starmap
from distutils.version import LooseVersion
import warnings
import random
import nose
from numpy import nan, inf
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
date_range, period_range, timedelta_range, _np_version_under1p8)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import Timedelta, TimedeltaIndex
import pandas.core.common as com
import pandas.core.config as cf
import pandas.lib as lib
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
assert_frame_equal,
assert_index_equal,
ensure_clean)
import pandas.util.testing as tm
#------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
_multiprocess_can_split_ = True
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
com.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq','days_in_month','daysinmonth']
ok_for_period = ok_for_base + ['qyear']
ok_for_period_methods = ['strftime']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime']
ok_for_td = ['days','seconds','microseconds','nanoseconds']
ok_for_td_methods = ['components','to_pytimedelta','total_seconds']
def get_expected(s, name):
result = getattr(Index(s._values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
elif not com.is_list_like(result):
return result
return Series(result,index=s.index)
def compare(s, name):
a = getattr(s.dt,prop)
b = get_expected(s,prop)
if not (com.is_list_like(a) and com.is_list_like(b)):
self.assertEqual(a,b)
else:
tm.assert_series_equal(a,b)
# datetimeindex
for s in [Series(date_range('20130101',periods=5)),
Series(date_range('20130101',periods=5,freq='s')),
Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101',periods=5,tz='US/Eastern'))
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt,prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'CET')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# timedeltaindex
for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),
Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
self.assertIsInstance(result,DataFrame)
tm.assert_index_equal(result.index,s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.total_seconds()
self.assertIsInstance(result,pd.Series)
self.assertTrue(result.dtype == 'float64')
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
# both
index = date_range('20130101',periods=3,freq='D')
s = Series(date_range('20140204',periods=3,freq='s'),index=index)
tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index))
# periodindex
for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(date_range('20130101',periods=5,freq='D'))
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101',periods=5,freq='D').asobject)
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T'))
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
expected = Series(pd.date_range('2015-01-01',
'2016-01-01',
freq='T',
tz='UTC').tz_convert('America/Chicago'))
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment','raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"):
s.dt.xlabel = "a"
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33', '2015/02/05 11-22-33',
'2015/02/06 11-22-33', '2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range('2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34', '2015/02/03 11-22-35',
'2015/02/03 11-22-36', '2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
period_index = period_range('20150301', periods=5)
result = period_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])
result = s.dt.strftime('%Y-%m-%d %H:%M:%S')
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=4, freq='H'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S')
expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00",
"2013/01/01 02:00:00", "2013/01/01 03:00:00"])
s = Series(period_range('20130101', periods=4, freq='L'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')
expected = Series(["2013/01/01 00:00:00.000", "2013/01/01 00:00:00.001",
"2013/01/01 00:00:00.002", "2013/01/01 00:00:00.003"])
tm.assert_series_equal(result, expected)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101',periods=5,freq='D'))
s.iloc[2] = pd.NaT
for attr in ['microsecond','nanosecond','second','minute','hour','day']:
expected = getattr(s.dt,attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt,attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
for s in [Series(np.arange(5)),
Series(list('abcde')),
Series(np.random.randn(5))]:
with tm.assertRaisesRegexp(AttributeError,
"only use .dt accessor"):
s.dt
self.assertFalse(hasattr(s, 'dt'))
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
self.assertTrue('str' in dir(s))
self.assertTrue('dt' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
self.assertTrue('dt' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .cat, but with the twist that str and dt should be there
# if the categories are of that type
# first cat and str
s = Series(list('abbcd'), dtype="category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' in dir(s)) # as it is a string categorical
self.assertTrue('dt' not in dir(s))
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('dt' in dir(s)) # as it is a datetime categorical
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts.mul(self.ts)
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
result = self.ts.add(cp)
self.assertIsNone(result.name)
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_combine_first_dt64(self):
from pandas.tseries.tools import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result,expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result,expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})
vc = df.i.value_counts()
result = vc.get(99,default='Missing')
self.assertEqual(result,'Missing')
vc = df.b.value_counts()
result = vc.get(False,default='Missing')
self.assertEqual(result,3)
result = vc.get(True,default='Missing')
self.assertEqual(result,'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1,5),index=lrange(1,5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2,5),index=lrange(2,5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object')))
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip_name(self.ts)
self.assertEqual(unpickled.name, self.ts.name)
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestNanops(tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
s == s2
s2 == s
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
self.assertTrue((df.sum(1) == 0).all())
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32','int64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min(skipna=False)
self.assertEqual(int(result),0)
result = s.max(skipna=False)
self.assertEqual(int(result),v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min()
self.assertEqual(int(result),0)
result = s.max()
self.assertEqual(int(result),v[-1])
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min(skipna=False)
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max(skipna=False)
self.assertTrue(np.allclose(float(result), v[-1]))
# use bottleneck if available
result = s.sum()
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min()
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max()
self.assertTrue(np.allclose(float(result), v[-1]))
class SafeForSparse(object):
pass
_ts = tm.makeTimeSeries()
class TestSeries(tm.TestCase, CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
self.ts = _ts.copy()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
self.assertNotIsInstance(scalar, float)
# coercion
self.assertEqual(float(Series([1.])), 1.0)
self.assertEqual(int(Series([1.])), 1)
self.assertEqual(long(Series([1.])), 1)
def test_astype(self):
s = Series(np.random.randn(5),name='foo')
for dtype in ['float32','float64','int64','int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_TimeSeries_deprecation(self):
# deprecation TimeSeries, #10890
with tm.assert_produces_warning(FutureWarning):
pd.TimeSeries(1,index=date_range('20130101',periods=3))
def test_constructor(self):
# Recognize TimeSeries
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(self.ts.is_time_series)
self.assertTrue(self.ts.index.is_all_dates)
# Pass in Series
derived = Series(self.ts)
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(derived.is_time_series)
self.assertTrue(derived.index.is_all_dates)
self.assertTrue(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assertEqual(mixed.dtype, np.object_)
self.assertIs(mixed[1], np.NaN)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(self.empty.is_time_series)
self.assertFalse(self.empty.index.is_all_dates)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(Series({}).is_time_series)
self.assertFalse(Series({}).index.is_all_dates)
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
self.assertEqual(rs, xp)
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
self.assertRaises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)),dtype='int64')
result = Series(range(10),dtype='int64')
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)
res = Series(cat)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
from pandas import tslib
data = ma.masked_all((3,), dtype='M8[ns]')
result = Series(data)
expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), tslib.iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
tm.assertIsInstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
self.assertEqual(s.dtype, np.dtype('i8'))
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
self.assertEqual(s.dtype, np.dtype('f8'))
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
self.assertEqual(s.dtype, np.float64)
s = Series(None, index=lrange(5), dtype=object)
self.assertEqual(s.dtype, np.object_)
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s,expected)
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
self.assertEqual(s[1], 5)
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly infering on dateimelike looking when object dtype is specified
s = Series([Timestamp('20130101'),'NOV'],dtype=object)
self.assertEqual(s.iloc[0],Timestamp('20130101'))
self.assertEqual(s.iloc[1],'NOV')
self.assertTrue(s.dtype == object)
# the dtype was being reset on the slicing and re-inferred to datetime even
# thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame({'wing1':wing1, 'wing2':wing2, 'mat':mat}, index=belly)
result = df.loc['3T19']
self.assertTrue(result.dtype == object)
result = df.loc['216']
self.assertTrue(result.dtype == object)
def test_constructor_dtype_datetime64(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(tslib.iNaT, index=lrange(5))
self.assertFalse(isnull(s).all())
s = Series(nan, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
self.assertEqual(s.dtype, 'M8[ns]')
s.ix[0] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
self.assertRaises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
self.assertRaises(
TypeError, lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2,1,1)])
self.assertEqual(result[0], datetime(2,1,1,0,0))
result = Series([datetime(3000,1,1)])
self.assertEqual(result[0], datetime(3000,1,1,0,0))
# don't mix types
result = Series([ Timestamp('20130101'), 1],index=['a','b'])
self.assertEqual(result['a'], Timestamp('20130101'))
self.assertEqual(result['b'], 1)
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result,expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values,dates2)
self.assertEqual(series1.dtype,object)
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
self.assertTrue(Series(dr).iloc[0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is np.nan)
self.assertTrue('NaN' in str(s))
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101',periods=3,tz='US/Eastern')
s = Series(dr)
self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')
self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')
self.assertTrue(com.is_datetime64tz_dtype(s.dtype))
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# export
result = s.values
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == 'datetime64[ns]')
self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz)))
# indexing
result = s.iloc[0]
self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
result = s[0]
self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
result = s[Series([True,True,False],index=s.index)]
assert_series_equal(result,s[0:2])
result = s.iloc[0:1]
assert_series_equal(result,Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1],s.iloc[1:]])
assert_series_equal(result,s)
# astype
result = s.astype(object)
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
assert_series_equal(result, s)
# astype - datetime64[ns, tz]
result = Series(s.values).astype('datetime64[ns, US/Eastern]')
assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
assert_series_equal(result, s)
result = s.astype('datetime64[ns, CET]')
expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET'))
assert_series_equal(result, expected)
# short str
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# formatting with NaT
result = s.shift()
self.assertTrue('datetime64[ns, US/Eastern]' in str(result))
self.assertTrue('NaT' in str(result))
# long str
t = Series(date_range('20130101',periods=1000,tz='US/Eastern'))
self.assertTrue('datetime64[ns, US/Eastern]' in str(t))
result = pd.DatetimeIndex(s,freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')
self.assertTrue(lib.infer_dtype(s) == 'datetime64')
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
self.assertTrue(s.dtype == 'object')
self.assertTrue(lib.infer_dtype(s) == 'datetime')
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101',periods=5,freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.ix[0] = 0
expected.ix[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_index_type=True,
check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
self.assertRaises(TypeError, Series, values)
values = frozenset(values)
self.assertRaises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
self.assertTrue(tm.is_sorted(series.index))
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
self.assertEqual(series.dtype, np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
tm.assertIsInstance(series.index, Index)
def test_array_finalize(self):
pass
def test_pop(self):
# GH 6600
df = DataFrame({
'A': 0,
'B': np.arange(5,dtype='int64'),
'C': 0,
})
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assertEqual(nans.dtype, np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assertEqual(strings.dtype, np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assertEqual(dates.dtype, 'M8[ns]')
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_numpy_array_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan,index=['C'],dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
#ts[mask_shifted]
#ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
#ts.ix[mask_shifted]
#ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(np.isscalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)
expected = Series([3,4],index=['C','C'],dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df>5)
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan,2,3])
s = Series([1,2,3])
s.iloc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s.loc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s[0] = np.nan
assert_series_equal(s,expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan]))
s = Series([False,True])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan,1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_reshape_non_2d(self):
# GH 4554
x = Series(np.random.random(201), name='x')
self.assertTrue(x.reshape(x.shape,) is x)
# GH 2719
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
self.assertTrue(type(result) is type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
result = x.reshape((-1, 1))
self.assertNotIsInstance(result, Series)
result2 = np.reshape(x, (-1, 1))
self.assertNotIsInstance(result2, Series)
result = x[:, None]
expected = x.reshape((-1, 1))
assert_almost_equal(result, expected)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert(s.shape == rs.shape)
assert(rs is not s)
# test alignment
cond = Series([True,False,False,True,False],index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5,4,3,2,1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1,2,3,4])
result = s.where(s>2,np.nan)
expected = Series([np.nan,np.nan,3,4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan,index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0,1,2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0,'b',1,'d','e','f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a','b','c',0,1,'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)),'b','c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1,s2])
result = comb.where(comb < 2)
expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb<1] = 5
expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
comb[comb<2] += 10
expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1,2,3,4])
result = s.mask(s>2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_drop(self):
# unique
s = Series([1,2],index=['one','two'])
expected = Series([1],index=['one'])
result = s.drop(['two'])
assert_series_equal(result,expected)
result = s.drop('two', axis='rows')
assert_series_equal(result,expected)
# non-unique
# GH 5248
s = Series([1,1,2],index=['one','two','one'])
expected = Series([1,2],index=['one','one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result,expected)
result = s.drop('two')
assert_series_equal(result,expected)
expected = Series([1],index=['two'])
result = s.drop(['one'])
assert_series_equal(result,expected)
result = s.drop('one')
assert_series_equal(result,expected)
# single string/tuple-like
s = Series(range(3),index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a',))
# errors='ignore'
s = Series(range(3),index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2,3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3],index=[False])
assert_series_equal(result,expected)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s>1, 'X')
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, ['X', 'Y', 'Z'])
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, np.array(['X', 'Y', 'Z']))
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'),
(1, 2),
('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"),) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
new_ts = self.round_trip_pickle(ts)
self.assertEqual(new_ts.index.freq, 'M')
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_numpy_array_equal(self.ts, self.ts.values)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
self.assertTrue(np.isinf(s.sum()))
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_mode(self):
s = Series([12, 12, 11, 10, 19, 11])
exp = Series([11, 12])
assert_series_equal(s.mode(), exp)
assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))
lst = [5] * 20 + [1] * 10 + [6] * 25
np.random.shuffle(lst)
s = Series(lst)
assert_series_equal(s.mode(), Series([6]))
s = Series([5] * 10)
assert_series_equal(s.mode(), Series([5]))
s = Series(lst)
s[0] = np.nan
assert_series_equal(s.mode(), Series([6.]))
s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
assert_series_equal(s.mode(), Series(['e']))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
dtype='M8[ns]'))
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
self.assertTrue(isnull(result))
result = s.std(ddof=1)
self.assertTrue(isnull(result))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
self.assertTrue(isnull(result))
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3 values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.skew()))
self.assertTrue(np.isnan(df.skew()).all())
else:
self.assertEqual(0, s.skew())
self.assertTrue((df.skew() == 0).all())
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4 values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.kurt()))
self.assertTrue(np.isnan(df.kurt()).all())
else:
self.assertEqual(0, s.kurt())
self.assertTrue((df.kurt() == 0).all())
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
self.assertTrue(isnull(shifted[4]))
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
self.assert_numpy_array_equal(mindexer, mexpected)
self.assert_numpy_array_equal(qindexer, qexpected)
self.assertFalse(np.array_equal(qindexer, mindexer))
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
self.assert_numpy_array_equal(self.ts.cummin(),
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummax(self):
self.assert_numpy_array_equal(self.ts.cummax(),
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_npdiff(self):
raise nose.SkipTest("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False, check_allna=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max','min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
self.assertTrue(nanops._USE_BOTTLENECK)
import bottleneck as bn
self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
self.assertEqual(f(allna),0.0)
except:
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40+1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# check on string data
if name not in ['sum','min','max']:
self.assertRaises(TypeError, f, Series(list('abc')))
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in getargspec(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_numpy_array_equal(result, expected)
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index,
name='ts')
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
self.assertNotIsInstance(result, Series)
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts,dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index([], dtype=float))
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.objSeries.index:
self.assertEqual(value, self.objSeries[idx])
else:
self.fail("orphaned index!")
self.assertRaises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assertFalse(bool_series.all())
self.assertTrue(bool_series.any())
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
self.assertTrue(s1.all(skipna=False)) # nan && True => True
self.assertTrue(s1.all(skipna=True))
self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
self.assertFalse(s2.any(skipna=True))
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)
self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
self.assertRaises(NotImplementedError, s.any, bool_only=True)
self.assertRaises(NotImplementedError, s.all, bool_only=True)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values %
p['second'].values, dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
self.assertFalse(np.array_equal(result, result2))
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_div(self):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'), check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan,1.,-1.])
result = s / 0
expected = Series([np.nan,np.inf,-np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})
expected = Series([-0.01,-np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other):
_check_op(series, other, operator.gt)
_check_op(series, other, operator.ge)
_check_op(series, other, operator.eq)
_check_op(series, other, operator.lt)
_check_op(series, other, operator.le)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), pd.NaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), tslib.iNaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), np.nan])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
#for t in ['s', 'D', 'us', 'ms']:
# self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
self.assertRaises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'],dtype='m8[ns]')
self.assertRaises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([np.nan, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, None, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, np.nan, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(
Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(
Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype(
'int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))
self.assertEqual(rs[2], value)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series(
[Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series(
[Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
s + op(5)
op(5) + s
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:
s2 = Series([20, 30, 40],dtype=dtype)
expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
# invalid ops
for op in ['__true_div__','__div__','__mul__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, s2.astype(float))
self.assertRaises(TypeError, sop, 2.)
for op in ['__add__','__sub__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D','h','m','s','ms','us','ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m,unit))
result = s1 / np.timedelta64(m,unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)
result = np.timedelta64(m,unit) / s1
# astype
s = Series(date_range('20130101',periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0],datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0],timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,
'm': 60 * 1000000, 's': 1000000, 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs))
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'),index=['A','B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
### timedelta64 ###
td1 = Series([timedelta(minutes=5,seconds=3)]*3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5,seconds=4)
ops = ['__mul__','__floordiv__','__pow__',
'__rmul__','__rfloordiv__','__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
### datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
### datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
### timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rsub__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 + td2[0]
expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = td2[0] + dt2
expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt1 - td1[0]
expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 + td2
expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt1 - td1
expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 - td2
expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_timedelta64_functions(self):
from datetime import timedelta
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
#result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
self.assertEqual(result,0)
result = Series(dtype=float).mean()
self.assertTrue(isnull(result))
result = Series(dtype=float).median()
self.assertTrue(isnull(result))
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
self.assertTrue(result is pd.NaT)
result = Series(dtype='m8[ns]').median()
self.assertTrue(result is pd.NaT)
def test_timedelta_fillna(self):
#GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9*3600+60+1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'),
Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'], dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101',tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01',tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01',tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
# TimeSeries-specific
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
self.assert_numpy_array_equal(ts.fillna(method='ffill'),
[0., 1., 1., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(method='backfill'),
[0., 1., 3., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result,expected)
result = s1.fillna({})
assert_series_equal(result,s1)
result = s1.fillna(Series(()))
assert_series_equal(result,s1)
result = s2.fillna(s1)
assert_series_equal(result,s2)
result = s1.fillna({ 0 : 1})
assert_series_equal(result,expected)
result = s1.fillna({ 1 : 1})
assert_series_equal(result,Series([np.nan]))
result = s1.fillna({ 0 : 1, 1 : 1})
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1}))
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))
assert_series_equal(result,s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0,0,2.], list('bac'))
assert_series_equal(result,expected)
# limit
s = Series(np.nan,index=[0,1,2])
result = s.fillna(999,limit=1)
expected = Series([999,np.nan,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
result = s.fillna(999,limit=2)
expected = Series([999,999,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101', periods=5),
index=date_range('20130101', periods=5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
result = s - s.index.to_period()
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(5,2),
index=date_range('20130101', periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'], df['expected'], check_names=False)
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
#result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan
#self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1,1),(1,2)])
result = s == (1,2)
expected = Series([False,True])
assert_series_equal(result, expected)
result = s != (1,2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0,0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0,0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1,1),(1,1)])
result = s == (1,1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1,1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]),frozenset([1,2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s,s2),(s2,s)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
def test_more_na_comparisons(self):
left = Series(['a', np.nan, 'c'])
right = Series(['a', np.nan, 'd'])
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
expected = Series([False, False, True], list('bca'))
result = a ^ b
assert_series_equal(result,expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# vs non-matching
result = a & Series([1],['z'])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([1],['z'])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:
result = a[a | e]
assert_series_equal(result,a[a])
# vs scalars
index = list('bca')
t = Series([True,False,True])
for v in [True,1,2]:
result = Series([True,False,True],index=index) | v
expected = Series([True,True,True],index=index)
assert_series_equal(result,expected)
for v in [np.nan,'foo']:
self.assertRaises(TypeError, lambda : t | v)
for v in [False,0]:
result = Series([True,False,True],index=index) | v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [True,1]:
result = Series([True,False,True],index=index) & v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [False,0]:
result = Series([True,False,True],index=index) & v
expected = Series([False,False,False],index=index)
assert_series_equal(result,expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda : t & v)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
s_0101 = Series([0,1,0,1])
s_0123 = Series(range(4),dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4),dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8),dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1]*4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))
# s_tft will be all false now because of reindexing like s_0123
assert_series_equal(s_0123 & s_tft, Series([False] * 4))
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a','b',np.NaN,'d'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True,index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
self.assertRaises(ValueError, tester, s, d)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1,2,3],[1.1,2.1,3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result,expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
assert_series_equal(result, exp)
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5)
b = Series(a + 4j*a)
tm.assert_almost_equal(a,b.real)
tm.assert_almost_equal(4*a,b.imag)
b.real = np.arange(5)+5
tm.assert_almost_equal(a+5,b.real)
tm.assert_almost_equal(4*a,b.imag)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2,2,2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df,expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment',None)
df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
df["cc"] = 0.0
ck = [True]*len(df)
df["bb"].iloc[0] = .13
df_tmp = df.iloc[ck]
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment','raise')
# GH 3217
df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'],index=[0]))
expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))
tm.assert_frame_equal(df,expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_numpy_array_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
import operator
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A'])
tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A'])
tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A'])
tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_numpy_array_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
self.assertTrue(np.isfinite(combined).all())
self.assert_numpy_array_equal(combined[::2], series[::2])
self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
# No overlap
self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
raise nose.SkipTest("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
self.assertAlmostEqual(
self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)
# No overlap
self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.cov(cp)))
# min_periods
self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))
assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
self.assertTrue(np.all(result == expected.values))
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
self.assertRaises(Exception, a.dot, a.values[:3])
self.assertRaises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
self.assertEqual(result, 11)
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'),
Series([False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_drop_duplicates(self):
# check both int and object
for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:
expected = Series([False, False, False, True])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, True])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
for s in [Series([1, 2, 3, 5, 3, 2, 4]),
Series(['1', '2', '3', '5', '3', '2', '4'])]:
expected = Series([False, False, False, False, True, True, False])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, False, False, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
def test_sort_values(self):
ts = self.ts.copy()
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
ts.sort()
self.assert_numpy_array_equal(ts, self.ts.sort_values())
self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index)
ts.sort_values(ascending=False, inplace=True)
self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False))
self.assert_numpy_array_equal(ts.index,
self.ts.sort_values(ascending=False).index)
# GH 5856/5853
# Series.sort_values operating on a view
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0]
def f():
s.sort_values(inplace=True)
self.assertRaises(ValueError, f)
# test order/sort inplace
# GH6859
ts1 = self.ts.copy()
ts1.sort_values(ascending=False, inplace=True)
ts2 = self.ts.copy()
ts2.sort_values(ascending=False, inplace=True)
assert_series_equal(ts1,ts2)
ts1 = self.ts.copy()
ts1 = ts1.sort_values(ascending=False, inplace=False)
ts2 = self.ts.copy()
ts2 = ts.sort_values(ascending=False)
assert_series_equal(ts1,ts2)
def test_sort_index(self):
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_sort_index_inplace(self):
# For #11402
rindex = list(self.ts.index)
random.shuffle(rindex)
# descending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
self.assertIs(result, None,
msg='sort_index() inplace should return None')
assert_series_equal(random_order,
self.ts.reindex(self.ts.index[::-1]))
# ascending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
self.assertIs(result, None,
msg='sort_index() inplace should return None')
assert_series_equal(random_order, self.ts)
def test_sort_API(self):
# API for 9816
# sortlevel
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
# sort_index
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index(level=0)
assert_series_equal(sorted_series, self.ts)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
assert_series_equal(sorted_series, self.ts)
self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1))
sorted_series = random_order.sort_index(level=0, axis=0)
assert_series_equal(sorted_series, self.ts)
self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1))
def test_order(self):
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
self.ts.order()
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.sort_values()
self.assertTrue(np.isnan(result[-5:]).all())
self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))
result = ts.sort_values(na_position='first')
self.assertTrue(np.isnan(result[:5]).all())
self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.sort_values()
# ascending=False
ordered = ts.sort_values(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.sort_values(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
def test_nsmallest_nlargest(self):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
base = [3, 2, 1, 2, 5]
s_list = [
Series(base, dtype='int8'),
Series(base, dtype='int16'),
Series(base, dtype='int32'),
Series(base, dtype='int64'),
Series(base, dtype='float32'),
Series(base, dtype='float64'),
Series(base, dtype='uint8'),
Series(base, dtype='uint16'),
Series(base, dtype='uint32'),
Series(base, dtype='uint64'),
Series(base).astype('timedelta64[ns]'),
Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
]
raising = [
Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
]
for r in raising:
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assertRaisesRegexp(TypeError, msg):
method(arg)
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assertRaisesRegexp(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assertRaisesRegexp(ValueError, msg):
s.nlargest(keep='invalid')
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled),index=filled.index)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')
values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
self.assertTrue(ts.name is None)
self.assertTrue(ts.index.name is None)
# GH10483
self.ts.to_csv(path, header=True)
ts_h = Series.from_csv(path, header=0)
self.assertTrue(ts_h.name == 'ts')
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
self.assertIsNone(series.index.name)
assert_series_equal(self.series, series, check_names=False)
self.assertTrue(series.name is None)
self.assertTrue(series.index.name is None)
self.series.to_csv(path, header=True)
series_h = Series.from_csv(path, header=0)
self.assertTrue(series_h.name == 'series')
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv(path, sep='|')
checkseries = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv(path, sep='|', parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
lines = io.open(path, newline=None).readlines()
assert(lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding='UTF-8')
buf.seek(0)
s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')
assert_series_equal(s, s2)
def test_tolist(self):
rs = self.ts.tolist()
xp = self.ts.values.tolist()
assert_almost_equal(rs, xp)
# datetime64
s = Series(self.ts.index)
rs = s.tolist()
self.assertEqual(self.ts.index[0], rs[0])
def test_to_frame(self):
self.ts.name = None
rs = self.ts.to_frame()
xp = pd.DataFrame(self.ts.values, index=self.ts.index)
assert_frame_equal(rs, xp)
self.ts.name = 'testname'
rs = self.ts.to_frame()
xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
rs = self.ts.to_frame(name='testdifferent')
xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
def test_to_dict(self):
self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format='%.2f')
rs = Series.from_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
split = s.str.split(r'\s+and\s+')
buf = StringIO()
split.to_csv(buf)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path=None)
self.assertIsInstance(csv_str, str)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):
s.str.repeat(2)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assertIsInstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_dict_equal(result, ts, compare_keys=False)
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.isnull(), Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.notnull(), Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
self.assertRaises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=datetools.bday)
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index=date_range('2000-01-01',periods=5)
for dtype in ['int32','int64']:
s1 = Series(np.arange(5,dtype=dtype),index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan,0,1,2,3],index=index)
assert_series_equal(result,expected)
# xref 8260
# with tz
s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
result = s-s.shift()
assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
# incompat tz
s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo')
self.assertRaises(ValueError, lambda : s-s2)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_series_equal(shifted, shifted3)
self.assertRaises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),
name='ts')
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
self.assertRaises(ValueError, no_freq.tshift)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
self.assertTrue(np.all(sp1.values.codes[:1] == -1))
self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
self.assertTrue(np.all(sn2.values.codes[-2:] == -1))
self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert(len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert(len(truncated) == 0)
self.assertRaises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
# GH11163
s = Series([3, 5, np.nan, -3, 10])
self.assertEqual(s.ptp(), 13)
self.assertTrue(pd.isnull(s.ptp(skipna=False)))
mi = pd.MultiIndex.from_product([['a','b'], [1,2,3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
self.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
self.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with self.assertRaises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with self.assertRaises(TypeError):
s.ptp()
with self.assertRaises(NotImplementedError):
s.ptp(numeric_only=True)
def test_asof(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
self.ts[5:10] = np.NaN
self.ts[15:20] = np.NaN
val1 = self.ts.asof(self.ts.index[7])
val2 = self.ts.asof(self.ts.index[19])
self.assertEqual(val1, self.ts[4])
self.assertEqual(val2, self.ts[14])
# accepts strings
val1 = self.ts.asof(str(self.ts.index[7]))
self.assertEqual(val1, self.ts[4])
# in there
self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])
# no as of value
d = self.ts.index[0] - datetools.bday
self.assertTrue(np.isnan(self.ts.asof(d)))
def test_getitem_setitem_datetimeindex(self):
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil()
from dateutil.tz import tzutc
from pandas.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_asof_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - datetools.bday
self.assertTrue(np.isnan(ts.asof(d)))
def test_asof_more(self):
from pandas import date_range
s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
dates = s.index[[4, 5, 6, 2, 1]]
result = s.asof(dates)
expected = Series([2, 2, 3, 1, np.nan], index=dates)
assert_series_equal(result, expected)
s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
result = s.asof(s.index[0])
self.assertEqual(result, s[0])
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0 ])
mask = s > 0
s2 = s[ mask ].map( str )
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0 ])
mask = Series([False, True, True, False])
s2 = s[ mask ]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo','bar', 0]))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_numpy_array_equal(result, np.arange(1, 5))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64),
tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# GH9757
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
ts = Series([Timestamp('2010-01-04 00:00:00')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04')])
assert_series_equal(s, expected)
ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04 00:00:00-05:00')])
assert_series_equal(s, expected)
td = Series([Timedelta(1, unit='d')])
s = td.astype(tt)
expected = Series([tt('1 days 00:00:00.000000000')])
assert_series_equal(s, expected)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding
# for this test
former_encoding = sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 'B', 'C', 'D'], dtype='category')
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True,True,False],index=[1,2,3])
result = s.map({ True : 'foo', False : 'bar' })
expected = Series(['foo','foo','bar'],index=[1,2,3])
assert_series_equal(result,expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_divide_decimal(self):
''' resolves issue #9787 '''
from decimal import Decimal
expected = Series([Decimal(5)])
s = Series([Decimal(10)])
s = s/Decimal(2)
tm.assert_series_equal(expected, s)
s = Series([Decimal(10)])
s = s//Decimal(2)
tm.assert_series_equal(expected, s)
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
'''
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
'''
df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {
(1,): 'A',
(2,): 'B',
(3, 4): 'A',
(5, 6): 'B'
}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False)
def test_apply(self):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series([x, x ** 2],
index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True, convert_numeric=False)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0','2.0','3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0,2.0,3.0])
assert_series_equal(results, expected)
results = s._convert(True,False,True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False,numeric=True,timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True,True,False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
result = s2._convert(datetime=True,
numeric=False,
timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT]*4)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r._convert(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0','2'])
self.assertRaises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',',))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind,
method=meth, limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12,dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2,dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index, identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
reindexed_dep = self.empty.reindex(self.ts.index, method='pad')
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10),dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a','g','c','f']
expected = Series([1,1,3,3],index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True,False,False,True],index=list('abcd'))
new_index='agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True,True,False],index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False,index=lrange(0,5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False,index=lrange(0,5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013,3,5)
day2 = datetime(2013,5,5)
day3 = datetime(2014,3,5)
series1 = Series([5, None, None],[day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
#------------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
#------------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
#------------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
#------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])
# index with name
renamer = Series(
np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')
renamed = renamer.rename({})
self.assertEqual(renamed.index.name, renamer.index.name)
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
self.assertEqual(self.ts.index[0], expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan],
['z', 'a', 'b', 'c', 'd'], dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_unstack(self):
from numpy import nan
from pandas.util.testing import assert_frame_equal
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)
unstacked = s.unstack(0)
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1,2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
print(left)
print(right)
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],
['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sortlevel('A')
assert_series_equal(backwards, res)
res = s.sortlevel(['A', 'B'])
assert_series_equal(backwards, res)
res = s.sortlevel('A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sortlevel(['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.tail(), self.series[-5:])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True,True,False,False,False])
expected2 = Series([False,True,False,False,False])
# datetime64[ns]
s = Series(date_range('jan-01-2013','jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5),unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# TimeSeries-specific
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,
'cummin': cummin, 'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_replace(self):
N = 100
ser = Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
assert_series_equal(rs, ser)
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = Series([np.nan, 0, np.inf])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
assert_series_equal(ser.replace(np.inf, 0), filled)
ser = Series(self.ts.index)
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
assert_series_equal(result, expected)
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
#GH 5797
ser = Series(date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = Timestamp('20120101')
result = ser.replace({Timestamp('20130103'):
Timestamp('20120101')})
assert_series_equal(result, expected)
result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1,2,3])
assert_series_equal(result, Series([0,0,0,0,4]))
s = ser.copy()
s.replace([1,2,3],inplace=True)
assert_series_equal(s, Series([0,0,0,0,4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1,2,3],inplace=True,method='crash_cymbal')
assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = Series(np.arange(5),dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
assert_series_equal(expected, r)
assert_series_equal(expected, sc)
# should NOT upcast to float
e = Series([0,1,2,3,4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = Series([0,1,2,3.5,4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = Series([0,1,2,3.5,'a'])
tr, v = [3,4], [3.5,'a']
check_replace(tr, v, e)
# again casts to object
e = Series([0,1,2,3.5,Timestamp('20130101')])
tr, v = [3,4],[3.5,Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = Series([0,1,2,3.5,1])
tr, v = [3,4],[3.5,True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object)
assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = Series([True, False, True])
result = s.replace(True, '2u')
expected = Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = Series([True, False, True])
result = s.replace(True, False)
expected = Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),
datetime(2009, 11, 30),
datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq(datetools.bday)
monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)
self.assert_numpy_array_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
self.assertEqual(len(result), 0)
self.assertIsNot(result, ts)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
self.assertEqual(rs[1], 1)
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
# with tz
s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo')
result = s.diff()
assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
self.assertEqual(index, ts.index[5])
ts[-5:] = np.NaN
index = ts.last_valid_index()
self.assertEqual(index, ts.index[-6])
ts[:] = np.nan
self.assertIsNone(ts.last_valid_index())
self.assertIsNone(ts.first_valid_index())
ser = Series([], index=[])
self.assertIsNone(ser.last_valid_index())
self.assertIsNone(ser.first_valid_index())
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
#------------------------------------------------------------------------------
# GroupBy
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# Misc not safe for sparse
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])
for dtype in dtypes:
self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)
self.assertEqual(pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype, dtype)
def int_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):
return 'i'
elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype),
Series(dtype=dtype2)]).dtype
self.assertEqual(result.kind, expected)
def test_concat_empty_series_dtypes(self):
# bools
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype, np.int32)
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype, np.object_)
# datetimelike
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype, np.object_)
# categorical
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype, 'category')
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype, 'category')
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64').to_sparse()])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64')])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='object')])
self.assertEqual(result.dtype,np.object_)
self.assertEqual(result.ftype,'object:dense')
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
tm.assert_equal(r, e)
r = s.searchsorted([30])
e = np.array([2])
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4])
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
tm.assert_equal(r, e)
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2])
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2])
tm.assert_numpy_array_equal(r, e)
def test_to_frame_expanddim(self):
# GH 9762
class SubclassedSeries(Series):
@property
def _constructor_expanddim(self):
return SubclassedFrame
class SubclassedFrame(DataFrame):
pass
s = SubclassedSeries([1, 2, 3], name='X')
result = s.to_frame()
self.assertTrue(isinstance(result, SubclassedFrame))
expected = SubclassedFrame({'X': [1, 2, 3]})
assert_frame_equal(result, expected)
class TestSeriesNonUnique(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
self.assertIn('value', df)
df = ser.reset_index(name='value2')
self.assertIn('value2', df)
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
self.assertEqual(len(rs.columns), 2)
rs = s.reset_index(level=[0, 2], drop=True)
self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))
tm.assertIsInstance(rs, Series)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(s.is_time_series == True)
self.assertTrue(s.index.is_all_dates == True)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(ser.is_time_series)
self.assertTrue(ser.index.is_all_dates)
self.assertIsInstance(ser.index, DatetimeIndex)
def test_replace(self):
N = 100
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_datetime_timedelta_quantiles(self):
# covers #9694
self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5)))
self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5)))
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
self.assertIs(Series([], dtype=dtype).min(), pd.NaT)
self.assertIs(Series([], dtype=dtype).max(), pd.NaT)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/frame/test_constructors.py | 7 | 73312 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import nose
from numpy.random import randn
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from pandas.types.common import is_integer_dtype
from pandas.compat import (lmap, long, zip, range, lrange, lzip,
OrderedDict, is_platform_little_endian)
from pandas import compat
from pandas import (DataFrame, Index, Series, isnull,
MultiIndex, Timedelta, Timestamp,
date_range)
from pandas.core.common import PandasError
import pandas as pd
import pandas.core.common as com
import pandas.lib as lib
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
self.assertRaises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
self.assertEqual(orig_df['col1'][0], 1.)
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
# GH 3243
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1, 0))
result = DataFrame([DataFrame(dict(A=lrange(5)))])
tm.assertIsInstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([(d, a) for d, a in zipper]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
self.assertEqual(a.dtype, df.a.dtype)
self.assertEqual(b.dtype, df.b.dtype)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
self.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
self.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
# #2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
self.assertEqual(len(self.ts1), 30)
self.assertEqual(len(self.ts2), 25)
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
# mix dict and array, wrong size - no spec for which error should raise
# first
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
tm.assert_equal(frame_none.get_value(0, 'a'), None)
tm.assert_equal(frame_none_list.get_value(0, 'a'), None)
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7})
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['a'])
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['b'])
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with tm.assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
r"\(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
r"\(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
with tm.assertRaisesRegexp(ValueError, 'If using all scalar values, '
'you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assertIsInstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assertIsInstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v))
for k, v in compat.iteritems(data)))
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return dict((i, {constructor(s): 2 * i})
for i, s in enumerate(dates_as_str))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return dict((i, {constructor(s): 2 * i})
for i, s in enumerate(td_as_int))
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
self.assertEqual(df['a'].dtype, 'object')
self.assertEqual(df['b'].dtype, 'object')
# list of periods
df = pd.DataFrame({'a': a.asobject.tolist(),
'b': b.asobject.tolist()})
self.assertEqual(df['a'].dtype, 'object')
self.assertEqual(df['b'].dtype, 'object')
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shpae (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with tm.assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_index_equal(frame.index, pd.Index(lrange(2)))
self.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
self.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([(k, v.filled()) if hasattr(
v, 'filled') else (k, v) for k, v in comb])
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with tm.assertRaisesRegexp(PandasError, 'constructor not '
'properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with tm.assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: range(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with tm.assertRaisesRegexp(ValueError,
'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
# ordering ambiguous, raise exception
with tm.assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_index_equal(result.index, Index(index))
self.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
tm.assertIsInstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with tm.assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3])) # noqa
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0, 0], dt)
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0, 0], dt)
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
self.assertTrue(df.iat[0, 0].tz is None)
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
self.assertTrue(str(df.iat[0, 0].tz) == 'UTC')
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
self.assertTrue(str(df.iat[0, 0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
# allow single nans to succeed
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should fail
else:
def f():
df.loc[:, np.nan]
self.assertRaises(TypeError, f)
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
class TestDataFrameConstructorWithDatetimeTZ(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
self.assertTrue(df['A'].dtype, 'M8[ns, US/Eastern')
self.assertTrue(df['A'].name == 'A')
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
idx2 = date_range('20130101', periods=3, tz='US/Eastern')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
if __name__ == '__main__':
import nose # noqa
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
vitay/ANNarchy | examples/pyNN/IF_cond_exp.py | 2 | 1427 | # ANNarchy - IF_cond_exp
#
# A single IF neuron with exponential, conductance-based synapses, fed by two spike sources.
#
# This is a reimplementation of the PyNN example:
#
# http://www.neuralensemble.org/trac/PyNN/wiki/Examples/IF_cond_exp
#
# authors: Helge Uelo Dinkelbach, Julien Vitay
from ANNarchy import *
# Parameters
dt = 0.1
tstop = 200.0
# Setup
setup(dt=dt)
# Input populations with predetermined spike times
spike_sourceE = SpikeSourceArray(spike_times= [float(i) for i in range(5,105,10)] )
spike_sourceI = SpikeSourceArray(spike_times= [float(i) for i in range(155,255,10)])
# Population with one IF_cond_exp neuron
ifcell = Population(1, IF_cond_exp)
ifcell.set(
{ 'i_offset' : 0.1, 'tau_refrac' : 3.0,
'v_thresh' : -51.0, 'tau_syn_E' : 2.0,
'tau_syn_I': 5.0, 'v_reset' : -70.0,
'e_rev_E' : 0., 'e_rev_I' : -80.0 } )
# Projections
connE = Projection(spike_sourceE, ifcell, 'exc').connect_all_to_all(weights=0.006, delays=2.0)
connI = Projection(spike_sourceI, ifcell, 'inh').connect_all_to_all(weights=0.02, delays=4.0)
# Compile the network
compile()
# Simulate
m = Monitor(ifcell, ['spike', 'v'])
simulate(tstop)
data = m.get()
# Show the result
import matplotlib.pyplot as plt
plt.plot(dt*np.arange(tstop/dt), data['v'][:, 0])
plt.xlabel('Time (ms)')
plt.ylabel('Vm (mV)')
plt.ylim([-66.0, -61.0])
plt.title('IF_cond_exp')
plt.show()
| gpl-2.0 |
bjsmith/motivation-simulation | get_data.py | 1 | 1907 | import pandas as pd
data_folder = '/Users/benjaminsmith/Documents/computational-modeling/data/'
exp_design = pd.read_csv(data_folder + 'daquila2012/exp-design.csv')
data_exp_1_raw = pd.read_csv(data_folder + 'daquila2012/exp-1.csv')
data_exp_2_raw = pd.read_csv(data_folder + 'daquila2012/exp-2.csv')
def reshape_experimental_data(exp_df,DoseAmountDict,DoseSubstance):
exp_df_long=pd.melt(exp_df, id_vars=['Subject','Measure'],var_name='ExposureEvent')
exp_df_wide=exp_df_long.groupby(['Subject']).apply(lambda d: d.pivot(index='ExposureEvent',columns='Measure',values='value'))
exp_df_tabular = exp_df_wide.reset_index()
exp_df_tabular.ExposureEvent = [int(r.replace("Event ", "")) for r in exp_df_tabular.ExposureEvent]
exp_df_tabular_ordered = exp_df_tabular.sort_values(['Subject','ExposureEvent']).reset_index(drop=True)
exp_df_tabular_ordered_alldata = pd.merge(exp_df_tabular_ordered, exp_design, how='left', on='ExposureEvent')
concentration_dict = {1: (0.9 / 100), 2: (2.70 / 100)}
exp_df_tabular_ordered_alldata['ConcentrationAmount']= [concentration_dict[c] for c in exp_df_tabular_ordered_alldata.Concentration]
exp_df_tabular_ordered_alldata['DoseAmount']= [DoseAmountDict[c] for c in
exp_df_tabular_ordered_alldata.DoseType]
depletion_dict = {1:'Na replete',2:'Na depleted'}
exp_df_tabular_ordered_alldata['DepletionDescription']= [depletion_dict[c] for c in exp_df_tabular_ordered_alldata.loc[:,"Depletion status"]]
exp_df_tabular_ordered_alldata['DoseSubstance']=DoseSubstance
return exp_df_tabular_ordered_alldata
data_exp_1 = reshape_experimental_data(data_exp_1_raw,{1:0,2:10,3:20,4:40},DoseSubstance='SCH 23390')
data_exp_2 = reshape_experimental_data(data_exp_2_raw,{1:0,2:25,3:125,4:250},DoseSubstance='Raclopride')
#data_exp_2.to_csv(data_folder + "myout.csv")
| gpl-3.0 |
lekshmideepu/nest-simulator | pynest/examples/plot_weight_matrices.py | 8 | 6989 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
"""
###############################################################################
# First, we import all necessary modules to extract, handle and plot
# the connectivity matrices
import numpy as np
import matplotlib.pyplot as plt
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
# We now specify a function to extract and plot weight matrices for all
# connections among `E_neurons` and `I_neurons`.
#
# We initialize all the matrices, whose dimensionality is determined by the
# number of elements in each population.
# Since in this example, we have 2 populations (E/I), :math:`2^2` possible
# synaptic connections exist (EE, EI, IE, II).
#
# Note the use of "post-pre" notation when referring to synaptic connections.
# As a matter of convention in computational neuroscience, we refer to the
# connection from inhibitory to excitatory neurons (I->E) as EI (post-pre) and
# connections from excitatory to inhibitory neurons (E->I) as IE (post-pre).
def plot_weight_matrices(E_neurons, I_neurons):
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
a_EE = nest.GetConnections(E_neurons, E_neurons)
# Using `get`, we can extract the value of the connection weight,
# for all the connections between these populations
c_EE = a_EE.weight
# Repeat the two previous steps for all other connection types
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = a_EI.weight
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = a_IE.weight
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = a_II.weight
# We now iterate through the range of all connections of each type.
# To populate the corresponding weight matrix, we begin by identifying
# the source-node_id (by using .source) and the target-node_id.
# For each node_id, we subtract the minimum node_id within the corresponding
# population, to assure the matrix indices range from 0 to the size of
# the population.
# After determining the matrix indices [i, j], for each connection
# object, the corresponding weight is added to the entry W[i,j].
# The procedure is then repeated for all the different connection types.
a_EE_src = a_EE.source
a_EE_trg = a_EE.target
a_EI_src = a_EI.source
a_EI_trg = a_EI.target
a_IE_src = a_IE.source
a_IE_trg = a_IE.target
a_II_src = a_II.source
a_II_trg = a_II.target
for idx in range(len(a_EE)):
W_EE[a_EE_src[idx] - min(E_neurons),
a_EE_trg[idx] - min(E_neurons)] += c_EE[idx]
for idx in range(len(a_EI)):
W_EI[a_EI_src[idx] - min(I_neurons),
a_EI_trg[idx] - min(E_neurons)] += c_EI[idx]
for idx in range(len(a_IE)):
W_IE[a_IE_src[idx] - min(E_neurons),
a_IE_trg[idx] - min(I_neurons)] += c_IE[idx]
for idx in range(len(a_II)):
W_II[a_II_src[idx] - min(I_neurons),
a_II_trg[idx] - min(I_neurons)] += c_II[idx]
fig = plt.figure()
fig.subtitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[:-1, :-1])
ax2 = plt.subplot(gs[:-1, -1])
ax3 = plt.subplot(gs[-1, :-1])
ax4 = plt.subplot(gs[-1, -1])
plt1 = ax1.imshow(W_EE, cmap='jet')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt1, cax=cax)
ax1.set_title('W_{EE}')
plt.tight_layout()
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
plt.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
plt.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
plt.tight_layout()
#################################################################################
# The script iterates through the list of all connections of each type.
# To populate the corresponding weight matrix, we identify the source-node_id
# (first element of each connection object, `n[0]`) and the target-node_id (second
# element of each connection object, `n[1]`).
# For each `node_id`, we subtract the minimum `node_id` within the corresponding
# population, to assure the matrix indices range from 0 to the size of the
# population.
#
# After determining the matrix indices `[i, j]`, for each connection object, the
# corresponding weight is added to the entry `W[i,j]`. The procedure is then
# repeated for all the different connection types.
#
# We then plot the figure, specifying the properties we want. For example, we
# can display all the weight matrices in a single figure, which requires us to
# use ``GridSpec`` to specify the spatial arrangement of the axes.
# A subplot is subsequently created for each connection type. Using ``imshow``,
# we can visualize the weight matrix in the corresponding axis. We can also
# specify the colormap for this image.
# Using the ``axis_divider`` module from ``mpl_toolkits``, we can allocate a small
# extra space on the right of the current axis, which we reserve for a
# colorbar.
# We can set the title of each axis and adjust the axis subplot parameters.
# Finally, the last three steps are repeated for each synapse type.
| gpl-2.0 |
gditzler/py-npfs | src/npfs.py | 1 | 6070 | #!/usr/bin/env python
import numpy as np
import feast
from scipy.stats import binom
from multiprocessing import Pool
import matplotlib.pylab as plt
__author__ = "Gregory Ditzler"
__copyright__ = "Copyright 2014, EESI Laboratory (Drexel University)"
__credits__ = ["Gregory Ditzler"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Gregory Ditzler"
__email__ = "[email protected]"
__status__ = "development"
class npfs:
def __init__(self, fs_method="JMI", n_select=5, n_bootstraps=100, \
verbose=False, alpha=.01, beta=0.0, parallel=None, min_improv=0.):
"""
@self - self explanitory
@fs_method - feature selection algorithm to use. Available methods are:
CIFE, CMIM, CONDMI, CONDRED, DISR, ICAP, JMI, MIM, MIFS, mRMR
DEFAULT: JMI
@n_select - number of features to select. this is the number of
features that the base feature selection uses. NPFS may
select a different number of features [DEFAULT = 5]
@n_bootstraps - number of bootstraps [DEFAULT = 100]
@alpha - size of the hypothesis test [DEFAULT = 0.01]
@beta - bias parameter for the test [DEFAULT = 0.0]
@parallel - number of parallel workers to use [DEFAULT = None]
@min_improv - critera for early stopping [DEFAULT = 0.0]
"""
self.fs_method = fs_method
self.n_select = n_select
self.n_bootstraps = n_bootstraps
self.alpha = alpha
self.beta = beta
self.selected_features = []
self.parallel = parallel
self.min_improv = min_improv
if min_improv != 0.:
self.early_stopping = True
else:
self.early_stopping = False
def fit(self, data, labels):
"""
@self - self explanitory
@data - data in a numpy array. here are some suggestions for formatting
the data.
len(data) = n_observations
len(data.transpose()) = n_features
@labels - numerical class labels in a numpy array.
len(labels) = n_observations
"""
data, labels = self.__check_data(data, labels)
try:
fs_method = getattr(feast, self.fs_method)
except ImportError:
raise("Method does not exist in FEAST")
self.n_observations = len(data)
self.n_features = len(data.transpose())
self.method = fs_method
# @Z - contains the observations of the Bernoulli random variables
# that are whether the feature were or were not selected
Z = np.zeros( (self.n_features, self.n_bootstraps) )
self.data = data
self.labels = labels
if self.parallel == None:
if self.early_stopping == False:
for b in range(self.n_bootstraps):
sf = self.boot_iteration()
Z[sf, b] = 1 # mark the features selected with a '1'.
else:
p1_old = np.zeros((self.n_features,))
for b in range(self.n_bootstraps):
sf = self.boot_iteration()
Z[sf, b] = 1.
p1 = Z.sum(axis=1)/b
d = np.abs(p1 - p1_old).mean()
if d < self.min_improv:
self.run_time = b
break
p1_old = p1
else:
pool = Pool(processes = self.parallel)
sfs = pool.map(__call__, (self for x in range(self.n_bootstraps)))
for x in range(len(sfs)):
Z[sfs[x], x] = 1
z = np.sum(Z, axis=1) # z is a binomial random variable
# compute the neyman-pearson threshold (include the bias term)
p = (1.0*self.n_select)/self.n_features + self.beta
if p > 1.0: # user chose \beta poorly -- null it out
raise ValueError("p+beta > 1 -> Invalid probability")
delta = binom.ppf(1 - self.alpha, self.n_bootstraps, p)
# based on the threshold, determine which features are relevant and return
# them in a numpy array
selected_features = []
for k in range(self.n_features):
if z[k] > delta:
selected_features.append(k)
self.Bernoulli_matrix = Z
self.selected_features = np.array(selected_features)
return self.selected_features
def __check_data(self, data, labels):
"""
The data and label arrays must be of the same length. Furthermore,
the data are expected to be in numpy arrays. Return an error if this
is not the case. Otherwise, if everything else check out, cast the
arrays as floats. Its how the data are expected for PyFeast.
"""
if isinstance(data, np.ndarray) is False:
raise Exception("Data must be an numpy ndarray.")
if isinstance(labels, np.ndarray) is False:
raise Exception("Labels must be an numpy ndarray.")
if len(data) != len(labels):
raise Exception("Data and labels must be the same length")
return 1.0*data, 1.0*labels
def boot_iteration(self, null=None):
"""
@self
@null - leave alone
"""
# generate some random integers that are the boostrap indices. the size
# of the bootstrap is the size of the data sample. hence all samples are
# sampled with replacement
idx = np.random.randint(0, self.n_observations, self.n_observations)
data_sub = self.data[idx] # bootstrap features
labels_sub = self.labels[idx] # bootstrap labels
sf = self.method(data_sub, labels_sub, self.n_select) # run feature selection
return sf
def plot_bernoulli_matrix(self, show_npfs=False):
"""
Plot the heatmap of the Bernoulli matrix
@self
@show_npfs - Highlight NPFS detections [Boolean]
"""
matrix = self.Bernoulli_matrix
if show_npfs == False:
plot = plt.imshow(matrix)
plot.set_cmap('hot')
plt.colorbar()
plt.xlabel("Bootstraps")
plt.ylabel("Feature")
plt.show()
else:
for i in self.selected_features:
for k in range(len(matrix[i])):
matrix[i,k] = .5
plot = plt.imshow(matrix)
plot.set_cmap('hot')
plt.xlabel("Bootstraps")
plt.ylabel("Feature")
plt.colorbar()
plt.show()
return None
def __call__(obj):
"""
This is a weird little hack to get around using multiprocessing with
the package being called inside of the NPFS object
"""
return obj.boot_iteration(None)
| gpl-3.0 |
Nyker510/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
Lowingbn/iccpy | simulations/aquarius.py | 1 | 4810 | import iccpy.gadget
import matplotlib.pyplot as pl
import numpy as np
import iccpy.utils
sim_label = { 'aqa' : 'A', 'aqb' : 'B', 'aqc':'C', 'aqd':'D', 'aqe':'E' }
last_snapnum = { 'aqa2' : 1023, 'aqa3' : 511, 'aqa4' : 1023, 'aqb2' : 127, 'aqc2' : 127, 'aqd2' : 127, 'aqe2' : 127 }
r_200 = { 'aqa1' : 245.67, 'aqa2' : 245.88, 'aqa3' : 245.64, 'aqa4' : 245.70, 'aqa5' : 246.37, 'aqb2' : 187.70, 'aqb4' : 188.85,
'aqc2' : 242.82, 'aqc4' : 243.68, 'aqd2' : 242.85, 'aqd4' : 243.60, 'aqe2' : 212.28, 'aqe4' : 213.63, 'aqf2' : 209.21,
'aqf4' : 207.15 }
M_200 = { 'aqa1' : 183.9, 'aqa2' : 184.2, 'aqa3' : 183.6, 'aqa4' : 183.8, 'aqa5' : 185.3, 'aqb2' : 81.94, 'aqb4' : 83.45,
'aqc2' : 177.4, 'aqc4' : 179.3, 'aqd2' : 177.4, 'aqd4' : 179.1, 'aqe2' : 118.5, 'aqe4' : 120.8, 'aqf2' : 113.5,
'aqf4' : 110.1 }
merger_tree_filename = { 'aqa2' : '/gpfs/data/jch/Aquarius/Trees/Aq-A/2/trees/treedir_127/tree_127.0.hdf5',
'aqb2' : '/gpfs/data/d50wse/WMAP7_Trees/trees_Aq-B2/treedir_127/tree_127.0.hdf5',
'aqc2' : '/gpfs/data/d50wse/WMAP7_Trees/trees_Aq-C2/treedir_127/tree_127.0.hdf5',
'aqd2' : '/gpfs/data/d50wse/WMAP7_Trees/trees_Aq-D2/treedir_127/tree_127.0.hdf5',
'aqe2' : '/gpfs/data/d50wse/WMAP7_Trees/trees_Aq-E2/treedir_127/tree_127.0.hdf5' }
def get_dir(sim_name):
return "/gpfs/data/aquarius/halo_data/Aq-%s/%c/" % (sim_label[sim_name[0:3]], sim_name[3])
def load_last_snapshot(sim_name):
return iccpy.gadget.load_snapshot(directory=get_dir(sim_name), snapnum=last_snapnum[sim_name])
def get_subhaloes(sim_name, snapnum=None):
if snapnum==None:
snapnum=last_snapnum[sim_name]
catalogue = iccpy.gadget.SubfindCatalogue(get_dir(sim_name), snapnum)
return catalogue.subhalo
def get_halo_centre(sim_name):
return get_subhaloes(sim_name)[0].pot_min
def get_merger_tree(sim_name):
return MergerTree(merger_tree_filename[sim_name])
def plot(plot_func, haloes=['A', 'B', 'C', 'D', 'E', 'F'], legend=None, tick_length=8, minor_tick_x_space=None, minor_tick_y_space=None):
from matplotlib.ticker import MultipleLocator
haloes = np.array(haloes)
# no space between the panels
pl.rcParams.update({'figure.subplot.wspace':0,'figure.subplot.hspace':0})
all_haloes = np.array(['A', 'B', 'C', 'D', 'E', 'F'])
plotIdxs = np.sort(iccpy.utils.match(haloes, all_haloes))
numRows = 3
numCols = 2
for i in plotIdxs:
ax = pl.subplot(numRows,numCols,i+1)
plot_func(all_haloes[i], ax)
#Tidy up plot
if minor_tick_y_space is not None:
ax.yaxis.set_minor_locator(MultipleLocator(minor_tick_y_space))
if minor_tick_x_space is not None:
ax.xaxis.set_minor_locator(MultipleLocator(minor_tick_x_space))
left_tick = i%numCols==0 or i-1 not in plotIdxs
ax.yaxis.get_label().set_visible(left_tick)
for tick in ax.yaxis.get_major_ticks():
tick.label1On=left_tick
tick.tick1line.set_markersize(tick_length)
tick.tick2line.set_markersize(tick_length)
if left_tick and i-numCols in plotIdxs:
lims = ax.get_ylim()
ax.set_ylim(lims[0], 0.9999999999*lims[1])
lower_tick = i>=(numRows-1)*numCols or i+numCols not in plotIdxs
ax.xaxis.get_label().set_visible(lower_tick)
for tick in ax.xaxis.get_major_ticks():
tick.label1On=lower_tick
tick.tick1line.set_markersize(tick_length)
tick.tick2line.set_markersize(tick_length)
for tick in ax.yaxis.get_minor_ticks() + ax.xaxis.get_minor_ticks():
tick.tick1line.set_markersize(tick_length/2)
tick.tick2line.set_markersize(tick_length/2)
if lower_tick and i+1 in plotIdxs and (i+1)%numCols!=0:
lims = ax.get_xlim()
ax.set_xlim(lims[0], 0.9999999999*lims[1])
if lower_tick and not left_tick and i<(numRows-1)*numCols:
lims = ax.get_xlim()
ax.set_xlim(lims[0]*1.0000000001, lims[1])
if ax.get_legend() is not None:
if legend is None:
ax.get_legend().set_visible(False)
elif legend is 'All' or legend is 'all' or all_haloes[i] in legend:
ax.get_legend().draw_frame(False)
else:
ax.get_legend().set_visible(False)
def plot_test(halo, ax):
ax.plot([1,2], [3,4])
pl.legend(['FISH'])
pl.ylabel('x')
if __name__=="__main__":
#print load_last_snapshot("aqa4")
#print get_halo_centre("aqa4")
plot(plot_test, haloes=['A', 'B', 'C', 'D', 'E'], legend='A', minor_tick_x_space=0.025)
#plot(plot_test, minor_tick_x_space=0.025)
pl.show()
| mit |
clolsonus/madesigner | sandbox/ft_wing2.py | 1 | 7423 | #!/usr/bin/env python3
# this is a quick test to compute dimensions for a flight test style
# folded foam board wing ... losely based on the clarky airfoil sorta
# kinda
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
from ft_profile import FtProfile, my_dist
ap = argparse.ArgumentParser(description="Compute the dimensions of FT-style folded wing. All dimensions are mm unless otherwise noted.")
ap.add_argument('root_chord_mm', type=float, nargs='?', help='root chord')
ap.add_argument('tip_chord_mm', type=float, nargs='?', help='tip chord')
ap.add_argument('span_mm', type=float, nargs='?', help='1/2 span')
ap.add_argument('sweep_mm', type=float, nargs='?', help='sweep offset from straight at tip')
ap.add_argument('inner_dihedral_deg', type=float, nargs='?', help='inner dihedral angle')
ap.add_argument('outer_dihedral_deg', type=float, nargs='?', help='outer dihedral angle')
ap.add_argument('--material_mm', type=float, default=4.9,
help='material thickness')
#ap.add_argument('--span_mm', type=float, help=
args = ap.parse_args()
#print(args)
def parse_val(val):
if not len(val):
print("No response, assuming 0.0")
return 0
else:
try:
return float(val)
except:
print("Entry is not a valid number, aborting, sorry...")
quit()
# do prompts if values aren't passed on the command line
if not args.root_chord_mm:
val = input("Enter root chord (mm): ")
args.root_chord_mm = parse_val(val)
if not args.root_chord_mm:
print("Cannot continue without a root chord size, sorry...")
quit()
if not args.tip_chord_mm:
val = input("Enter tip chord (mm): ")
args.tip_chord_mm = parse_val(val)
if args.tip_chord_mm:
if not args.span_mm:
val = input("Enter wing 1/2 span (mm): ")
args.span_mm = parse_val(val)
if args.sweep_mm is None:
val = input("Enter leading edge sweep at tip (mm): ")
args.sweep_mm = parse_val(val)
if args.inner_dihedral_deg is None:
val = input("Enter dihedral angle at root (deg): ")
args.inner_dihedral_deg = parse_val(val)
if args.outer_dihedral_deg is None:
val = input("Enter dihedral angle at tip (deg): ")
args.outer_dihedral_deg = parse_val(val)
# units: let's do mm
r2d = 180 / math.pi
d2r = math.pi / 180
root = FtProfile(args.root_chord_mm, args.material_mm)
root.compute()
root.plot()
if not args.tip_chord_mm:
print("simple root profile finished, thank you!")
quit()
# proceeding with a full wing
if not args.span_mm:
print("Cannot generate a whole wing plan without a valid span, sorry...")
quit()
tip = FtProfile(args.tip_chord_mm, args.material_mm)
tip.compute()
tip.plot()
if args.sweep_mm:
tip.outer += np.array([args.sweep_mm, 0])
tip.spar += np.array([args.sweep_mm, 0])
# https://stackoverflow.com/questions/55816902/finding-the-intersection-of-two-circles
def get_intersections(p0, r0, p1, r1):
# circle 1: (p0[0], p0[1]), radius r0
# circle 2: (p1[0], p1[1]), radius r1
print(p0, r0, p1, r1)
d=math.sqrt((p1[0]-p0[0])**2 + (p1[1]-p0[1])**2)
# non intersecting
if d > r0 + r1 :
print("non interesecting circles")
return None
# One circle within other
if d < abs(r0-r1):
print("one circle inside the other")
return None
# coincident circles
if d == 0 and r0 == r1:
print("coincident circles")
return None
else:
a=(r0**2-r1**2+d**2)/(2*d)
h=math.sqrt(r0**2-a**2)
x2=p0[0]+a*(p1[0]-p0[0])/d
y2=p0[1]+a*(p1[1]-p0[1])/d
x3=x2+h*(p1[1]-p0[1])/d
y3=y2-h*(p1[0]-p0[0])/d
x4=x2-h*(p1[1]-p0[1])/d
y4=y2+h*(p1[0]-p0[0])/d
return (x3, y3, x4, y4)
margin = 5 # mm
dih_in = args.inner_dihedral_deg
dih_out = args.outer_dihedral_deg
def do_dihedral(orig, angle, side):
pt = orig.copy()
a = 0.5 * angle * d2r
d = pt[1]
pt[1] = math.cos(a)*d
if side == "inner":
pt[2] = math.sin(a)*d
else:
pt[2] -= math.sin(a)*d
return pt
# unfold the vertical 2d coordinates (with implied 3rd dimension due
# to span) into a new 2d top down space. This is intended to create
# cut files that will fold back together into the correct desired
# shape without weird nonsense over/under lap due to taper.
def unfold(root, tip):
cuts = []
scores = []
r_last = do_dihedral(np.hstack([root[0], 0]), dih_in, "inner")
t_last = do_dihedral(np.hstack([tip[0], args.span_mm]), dih_out, "outer")
dist = my_dist(r_last, t_last)
p1_last = [margin, margin]
p2_last = [margin+dist, margin]
cuts.append( [p1_last, p2_last] )
print(r_last, t_last, p1_last, p2_last)
sections = len(root)-1
for i in range(sections):
r = do_dihedral(np.hstack([root[i+1], 0]), dih_in, "inner")
t = do_dihedral(np.hstack([tip[i+1], args.span_mm]), dih_out, "outer")
print(r, t)
a = my_dist(r_last, t_last)
b = my_dist(r_last, r)
c = my_dist(t_last, r)
d = my_dist(t_last, t)
e = my_dist(r_last, t)
print(a, b, c, d, e)
x3, y3, x4, y4 = get_intersections(p1_last, b, p2_last, c)
if y3 > y4:
p1 = [ x3, y3 ]
else:
p1 = [ x4, y4 ]
x3, y3, x4, y4 = get_intersections(p1_last, e, p2_last, d)
if y3 > y4:
p2 = [ x3, y3 ]
else:
p2 = [ x4, y4 ]
if i == sections - 1:
cuts.append( [p1, p2] )
else:
scores.append( [p1, p2] )
cuts.append( [p1_last, p1] )
cuts.append( [p2_last, p2] )
r_last = r
t_last = t
p1_last = p1
p2_last = p2
return cuts, scores
def do_plot(cuts, scores):
# draw a plot of the unfolded layout
fig = plt.figure()
ax = fig.add_subplot()
ax.grid()
ax.set_aspect("equal")
for seg in cuts:
x, y = np.array([seg[0], seg[1]]).T
ax.plot( x, y, color="r")
for seg in scores:
x, y = np.array([seg[0], seg[1]]).T
ax.plot( x, y, color="b")
plt.show()
from svgwrite import Drawing, mm
def do_svg(file, cuts, scores):
# attempt to generate an svg "true scale" drawing
width = 762 # 762mm = 30"
height = 508 # 508mm = 20"
units = "mm"
dpi = 96 / 25.4 # for mm
dwg = Drawing( file, size = ("%d%s" % (width, units),
"%d%s" % (height, units)) )
dwg.viewbox(0, 0, width*dpi, height*dpi)
g = dwg.g() # group
dwg.add(g)
for seg in cuts:
line = dwg.line([seg[0][0]*mm, seg[0][1]*mm],
[seg[1][0]*mm, seg[1][1]*mm],
stroke='red', fill='none', stroke_width="1px")
g.add( line )
for seg in scores:
line = dwg.line([seg[0][0]*mm, seg[0][1]*mm],
[seg[1][0]*mm, seg[1][1]*mm],
stroke='blue', fill='none', stroke_width="1px")
g.add( line )
dwg.save()
cuts, scores = unfold(root.outer, tip.outer)
do_plot(cuts, scores)
do_svg("unfolded-wing.svg", cuts, scores)
cuts, scores = unfold(root.spar, tip.spar)
do_plot(cuts, scores)
do_svg("unfolded-spar.svg", cuts, scores)
| gpl-3.0 |
CCS-Lab/hBayesDM | Python/hbayesdm/models/_igt_orl.py | 1 | 10493 | from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import igt_preprocess_func
__all__ = ['igt_orl']
class IgtOrl(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='igt',
model_name='orl',
model_type='',
data_columns=(
'subjID',
'choice',
'gain',
'loss',
),
parameters=OrderedDict([
('Arew', (0, 0.1, 1)),
('Apun', (0, 0.1, 1)),
('K', (0, 0.1, 5)),
('betaF', (-Inf, 0.1, Inf)),
('betaP', (-Inf, 1, Inf)),
]),
regressors=OrderedDict([
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('Arew', 'reward learning rate'),
('Apun', 'punishment learning rate'),
('K', 'perseverance decay'),
('betaF', 'outcome frequency weight'),
('betaP', 'perseverance weight'),
]),
additional_args_desc=OrderedDict([
('payscale', 100),
]),
**kwargs,
)
_preprocess_func = igt_preprocess_func
def igt_orl(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Iowa Gambling Task - Outcome-Representation Learning Model
Hierarchical Bayesian Modeling of the Iowa Gambling Task [Ahn2008]_
using Outcome-Representation Learning Model [Haines2018]_ with the following parameters:
"Arew" (reward learning rate), "Apun" (punishment learning rate), "K" (perseverance decay), "betaF" (outcome frequency weight), "betaP" (perseverance weight).
.. [Ahn2008] Ahn, W. Y., Busemeyer, J. R., & Wagenmakers, E. J. (2008). Comparison of decision learning models using the generalization criterion method. Cognitive Science, 32(8), 1376-1402. https://doi.org/10.1080/03640210802352992
.. [Haines2018] Haines, N., Vassileva, J., & Ahn, W.-Y. (2018). The Outcome-Representation Learning Model: A Novel Reinforcement Learning Model of the Iowa Gambling Task. Cognitive Science. https://doi.org/10.1111/cogs.12688
.. codeauthor:: Nate Haines <[email protected]>
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Iowa Gambling Task, there should be 4 columns of data
with the labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "choice": Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).
- "gain": Floating point value representing the amount of currency won on that trial (e.g. 50, 100).
- "loss": Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "choice", "gain", "loss".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. Currently not available for this model.
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
For this model, it's possible to set the following model-specific argument to a value that you may prefer.
- ``payscale``: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('igt_orl').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import igt_orl
# Run the model and store results in "output"
output = igt_orl(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return IgtOrl(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
| gpl-3.0 |
dalejung/trtools | trtools/core/tests/test_wrangling.py | 1 | 5358 | from unittest import TestCase
import pandas as pd
import numpy as np
import trtools.core.wrangling as wrangling
import imp
imp.reload(wrangling)
import trtools.util.testing as tm
from trtools.tools.profiler import Profiler
pairwise = wrangling.pairwise
ind = pd.date_range(start="2000-01-01", freq="D", periods=300)
columns = ['col'+str(i) for i in range(50)]
df = pd.DataFrame(np.random.randn(len(ind), len(columns)), index=ind, columns=columns)
class TestWrangling(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_pairwise(self):
df = pd.DataFrame(index=list(range(10)))
for x in range(3):
df[x] = list(range(x, x+10))
nandf = df.copy().astype(float)
nandf.ix[9:,1] = np.nan
# test with order=True
# test with permutations
pairs = pairwise(df, lambda x, y: x.sum() - y.sum())
expected = pd.DataFrame([[0, -10, -20],
[10, 0, -10],
[20, 10, 0]], index=list(range(3)), dtype=float)
tm.assert_frame_equal(pairs, expected)
# test with combinations
pairs = pairwise(df, lambda x, y: x.sum() - y.sum(), order=False)
expected = pd.DataFrame([[0, -10, -20],
[-10, 0, -10],
[-20, -10, 0]], index=list(range(3)), dtype=float)
tm.assert_frame_equal(pairs, expected)
# test with combinations and values
# use nandf to test. np.ndarray.sum() returns NaN if it contains nan
pairs = pairwise(nandf, lambda x, y: x.sum() - y.sum(), order=True,
force_values=True)
expected = pd.DataFrame([[0, np.nan, -20],
[np.nan, np.nan, np.nan],
[20, np.nan, 0]], index=list(range(3)), dtype=float)
tm.assert_frame_equal(pairs, expected)
# test with np.nansum.
pairs = pairwise(nandf, lambda x, y: np.nansum(x) - np.nansum(y),
order=True, force_values=True)
expected = pd.DataFrame([[0, 0, -20],
[0, 0, -20],
[20, 20, 0]], index=list(range(3)), dtype=float)
tm.assert_frame_equal(pairs, expected)
# the np.nansum version should be same as Series.sum version
pairs_series = pairwise(nandf, lambda x, y: x.sum() - y.sum(),
order=True, force_values=False)
tm.assert_frame_equal(pairs, pairs_series)
def test_dshift_float(self):
"""
Since float is nan-able. The simple call should give the
same output
"""
test = df.dshift(1)
correct = df.shift(1)
tm.assert_almost_equal(test.values, correct.values)
test = df.dshift(-2)
correct = df.shift(-2)
tm.assert_almost_equal(test.values, correct.values)
def test_dshift_bool(self):
"""
bool has no nan.
"""
bf = df > 0
test = bf.dshift(1)
correct = bf.shift(1).fillna(False).astype(bool)
assert test.dtypes.unique()[0] == bool
assert test.dtypes.nunique() == 1
tm.assert_almost_equal(test.values, correct.values)
test = bf.dshift(-2)
correct = bf.shift(-2).fillna(False).astype(bool)
assert test.dtypes.unique()[0] == bool
assert test.dtypes.nunique() == 1
tm.assert_almost_equal(test.values, correct.values)
def test_dshift_int(self):
"""
int has no nan.
"""
intdf = (df * 100).astype(int)
test = intdf.dshift(1)
correct = intdf.shift(1).fillna(-1).astype(int)
assert test.dtypes.unique()[0] == int
assert test.dtypes.nunique() == 1
tm.assert_almost_equal(test.values, correct.values)
test = intdf.dshift(-2)
correct = intdf.shift(-2).fillna(-1).astype(int)
assert test.dtypes.unique()[0] == int
assert test.dtypes.nunique() == 1
tm.assert_almost_equal(test.values, correct.values)
def test_dshift_raw(self):
# bool
bf = df > 0
test = bf.dshift(1, raw=True)
correct = bf.shift(1).fillna(False).astype(float)
assert type(test) is np.ndarray
tm.assert_almost_equal(test, correct.values)
# float
test = df.dshift(1, raw=True)
correct = df.shift(1)
assert type(test) is np.ndarray
tm.assert_almost_equal(test, correct.values)
def test_dshift_fill_value(self):
# float
test = df.dshift(1, fill_value=-100)
correct = df.shift(1).fillna(-100)
tm.assert_almost_equal(test.values, correct.values)
# int
intdf = (df * 100).astype(int)
test = intdf.dshift(1, fill_value=-100)
correct = intdf.shift(1).fillna(-100)
tm.assert_almost_equal(test.values, correct.values)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| mit |
r0k3/arctic | tests/integration/test_arctic.py | 4 | 6898 | from datetime import datetime as dt, timedelta as dtd
from mock import patch
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import pytest
import time
import numpy as np
from arctic.arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException, QuotaExceededException
from ..util import get_large_ts
def test_connect_to_Arctic_string(mongo_host):
arctic = Arctic(mongo_host=mongo_host)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_connect_to_Arctic_connection(mongodb, mongo_host):
arctic = Arctic(mongodb)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_simple(library):
sym = 'symbol'
data = get_large_ts(100)
library.write(sym, data)
orig = dt.now()
time.sleep(1) # Move the timestamp on 1ms
data2 = get_large_ts(100)
library.write(sym, data2, prune_previous_version=False)
# Get the timeseries, it should be the same
read2 = library.read(sym).data
assert_frame_equal(read2, data2)
# Ensure we can get the previous version
read = library.read(sym, as_of=orig).data
assert_frame_equal(read, data)
def test_indexes(arctic):
c = arctic._conn
arctic.initialize_library("library", VERSION_STORE, segment='month')
chunk = c.arctic.library.index_information()
assert chunk == {u'_id_': {u'key': [(u'_id', 1)], u'ns': u'arctic.library', u'v': 1},
u'symbol_1_parent_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1),
(u'parent', 1),
(u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_1_sha_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_hashed': {u'background': True,
u'key': [(u'symbol', u'hashed')],
u'ns': u'arctic.library',
u'v': 1}}
snapshots = c.arctic.library.snapshots.index_information()
assert snapshots == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.snapshots',
u'v': 1},
u'name_1': {u'background': True,
u'key': [(u'name', 1)],
u'ns': u'arctic.library.snapshots',
u'unique': True,
u'v': 1}}
versions = c.arctic.library.versions.index_information()
assert versions == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1__id_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'_id', -1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1_version_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'version', -1)],
u'ns': u'arctic.library.versions',
u'unique': True,
u'v': 1}}
version_nums = c.arctic.library.version_nums.index_information()
assert version_nums == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.version_nums',
u'v': 1},
u'symbol_1': {u'background': True,
u'key': [(u'symbol', 1)],
u'ns': u'arctic.library.version_nums',
u'unique': True,
u'v': 1}}
def test_delete_library(arctic, library, library_name):
mongo = arctic._conn
# create a library2 library too - ensure that this isn't deleted
arctic.initialize_library('user.library2', VERSION_STORE, segment='month')
library.write('asdf', get_large_ts(1))
assert 'TEST' in mongo.arctic_test.collection_names()
assert 'TEST.versions' in mongo.arctic_test.collection_names()
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
arctic.delete_library(library_name)
assert 'TEST' not in mongo.arctic_user.collection_names()
assert 'TEST.versions' not in mongo.arctic_user.collection_names()
with pytest.raises(LibraryNotFoundException):
arctic[library_name]
with pytest.raises(LibraryNotFoundException):
arctic['arctic_{}'.format(library_name)]
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
def test_quota(arctic, library, library_name):
thing = list(range(100))
library._arctic_lib.set_quota(10)
assert arctic.get_quota(library_name) == 10
assert library._arctic_lib.get_quota() == 10
library.write('thing', thing)
with pytest.raises(QuotaExceededException):
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
with pytest.raises(QuotaExceededException):
arctic.check_quota(library_name)
def test_check_quota(arctic, library, library_name):
with patch('arctic.arctic.logger.info') as info:
arctic.check_quota(library_name)
assert info.call_count == 1
def test_default_mongo_retry_timout():
now = time.time()
with pytest.raises(LibraryNotFoundException):
Arctic('unresolved-host', serverSelectionTimeoutMS=0)['some.lib']
assert time.time() - now < 1.
| lgpl-2.1 |
Gregor-Mendel-Institute/SNPmatch | snpmatch/core/simulate.py | 1 | 3889 | import logging
import numpy as np
import pandas as pd
from . import snpmatch
from . import parsers
from . import snp_genotype
log = logging.getLogger(__name__)
def simulateSNPs(g, AccID, numSNPs, outFile=None, err_rate=0.001):
assert type(AccID) is str, "provide Accession ID as a string"
assert AccID in g.g.accessions, "accession is not present in the matrix!"
AccToCheck = np.where(g.g.accessions == AccID)[0][0]
log.info("loading input files")
acc_snp = g.g_acc.snps[:,AccToCheck]
informative_snps = np.where(acc_snp >= 0)[0] ## Removing NAs for accession
input_df = pd.DataFrame(np.column_stack((np.array(g.g.chromosomes)[informative_snps], g.g.positions[informative_snps], acc_snp[informative_snps] )), columns = ["chr", 'pos', 'snp'])
## Input -- pandas dataframe with chr, position and genotype
#assert type(input_df) == pd.core.frame.DataFrame, "please provide a pandas dataframe"
#assert input_df.shape[1] >= 3, "first three columns are needed in dataframe: chr, pos, snp"
## default error rates = 0.001
log.info("sampling %s positions" % numSNPs)
sampleSNPs = np.sort(np.random.choice(np.arange(input_df.shape[0]), numSNPs, replace=False))
input_df = input_df.iloc[sampleSNPs,:]
log.info("adding in error rate: %s" % err_rate)
num_to_change = int(err_rate * input_df.shape[0])
input_df.iloc[np.sort(np.random.choice(np.arange(input_df.shape[0]), num_to_change, replace=False)), 2] = np.random.choice(3, num_to_change)
input_df.iloc[:, 2] = parsers.snp_binary_to_gt( np.array(input_df.iloc[:,2]) )
if outFile is not None:
input_df.to_csv( outFile, sep = "\t", index = None, header = False )
return(input_df)
def simulateSNPs_F1(g, parents, numSNPs, outFile, err_rate, rm_hets = 1):
indP1 = np.where(g.g_acc.accessions == parents.split("x")[0])[0][0]
indP2 = np.where(g.g_acc.accessions == parents.split("x")[1])[0][0]
log.info("loading files!")
snpsP1 = g.g_acc.snps[:,indP1]
snpsP2 = g.g_acc.snps[:,indP2]
common_ix = np.where((snpsP1 >= 0) & (snpsP2 >= 0) & (snpsP1 < 2) & (snpsP2 < 2))[0]
segregating_ix = np.where(snpsP1[common_ix] != snpsP2[common_ix] )[0]
diff_ix = np.setdiff1d( np.arange(len(common_ix)), segregating_ix )
common_snps = np.zeros(len(common_ix), dtype="int8")
common_snps[segregating_ix] = 2
common_snps[diff_ix] = snpsP1[common_ix[diff_ix]]
input_df = pd.DataFrame( np.column_stack((np.array(g.g_acc.chromosomes)[common_ix], np.array(g.g_acc.positions)[common_ix], common_snps )), columns = ["chr", 'pos', 'snp'] )
log.info("sampling %s positions" % numSNPs)
sampleSNPs = np.sort(np.random.choice(np.arange(input_df.shape[0]), numSNPs, replace=False))
input_df = input_df.iloc[sampleSNPs,:]
input_df['snp'] = input_df['snp'].astype(int)
log.info("adding in error rate: %s" % err_rate)
num_to_change = int(err_rate * input_df.shape[0])
input_df.iloc[np.sort(np.random.choice(np.where(input_df['snp'] != 2)[0], num_to_change, replace=False)), 2] = np.random.choice(2, num_to_change)
## Also change hets randomly to homozygous
het_ix = np.where(input_df['snp'] == 2)[0]
input_df.iloc[het_ix, 2] = np.random.choice(3, het_ix.shape[0], p=[(1-rm_hets)/2,(1-rm_hets)/2,rm_hets])
## Save the file to a bed file
input_df.iloc[:, 2] = parsers.snp_binary_to_gt( np.array(input_df.iloc[:,2]) )
if outFile is not None:
input_df.to_csv( outFile, sep = "\t", index = None, header = False )
return(input_df)
def potatoSimulate(args):
g = snp_genotype.Genotype(args['hdf5File'], args['hdf5accFile'] )
if args['simF1']:
simulateSNPs_F1(g, args['AccID'], args['numSNPs'], args['outFile'], args['err_rate'], args['rm_het'])
else:
simulateSNPs(g, args['AccID'], args['numSNPs'], args['outFile'], args['err_rate'])
log.info("finished!")
| mit |
tjmassin/gwdetchar | gwdetchar/scattering/__main__.py | 1 | 7365 | # coding=utf-8
# Copyright (C) Alex Urban (2019)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GW DetChar. If not, see <http://www.gnu.org/licenses/>.
"""Simple command-line interface to gwdetchar.scattering
This module scans through records of optic motion and projects scattering
fringe frequencies. For those channels with fringes above a user-specified
threshold, a plot is created comparing the fringes to a high-resolution Q-scan
spectrogram.
To identify time segments where scattering is likely, please use the
command-line script: `gwdetchar-scattering --help`
"""
import os
import sys
from matplotlib import use
use('agg') # noqa: E402
from gwpy.time import to_gps
from .. import (cli, const)
from ..omega import highpass
from ..io.datafind import get_data
from . import (
OPTIC_MOTION_CHANNELS,
get_fringe_frequency,
plot,
)
__author__ = 'Alex Urban <[email protected]>'
__credits__ = 'Joshua Smith <[email protected]>' \
'Andrew Lundgren <andrew.lundgren>@ligo.org>' \
'Duncan Macleod <[email protected]>'
# global variables
ASD_KW = {
'method': 'median',
'fftlength': 8,
'overlap': 4,
}
MOTION_CHANNELS = [channel for key in OPTIC_MOTION_CHANNELS.keys()
for channel in OPTIC_MOTION_CHANNELS[key]]
logger = cli.logger('gwdetchar.scattering')
# -- main function ------------------------------------------------------------
def main(args=None):
"""Parse command-line arguments, process optics, and write plots
"""
# define command-line arguments
parser = cli.create_parser(description=__doc__)
parser.add_argument('gpstime', type=to_gps,
help='GPS time or datestring to analyze')
cli.add_ifo_option(parser, ifo=const.IFO)
parser.add_argument('-d', '--duration', type=float, default=60,
help='Duration (seconds) of analysis, default: 60')
parser.add_argument('-t', '--frequency-threshold', type=float, default=15,
help='critical fringe frequency threshold (Hz), '
'default: %(default)s')
parser.add_argument('-m', '--multipliers', default='1,2,4,8',
help='harmonic numbers to plot projected motion for, '
'should be given as a comma-separated list of '
'numbers, default: %(default)s')
parser.add_argument('-x', '--multiplier-for-threshold', type=int,
default=4,
help='frequency multiplier to use when applying '
'--frequency-threshold, default: %(default)s')
parser.add_argument('-w', '--primary-channel',
default='GDS-CALIB_STRAIN',
help='name of primary channel (without IFO prefix), '
'default: %(default)s')
parser.add_argument('-W', '--primary-frametype', default='{IFO}_HOFT_C00',
help='frametype from which to read primary channel, '
'default: %(default)s')
parser.add_argument('-a', '--aux-frametype', default='{IFO}_R',
help='frametype from which to read aux channels, '
'default: %(default)s')
parser.add_argument('-o', '--output-dir', type=os.path.abspath,
default=os.curdir,
help='Output directory for analysis, '
'default: %(default)s')
parser.add_argument('-c', '--colormap', default='viridis',
help='name of colormap to use, default: %(default)s')
# parse arguments
args = parser.parse_args(args)
ifo = args.ifo
gps = float(args.gpstime)
gpsstart = gps - 0.5 * args.duration
gpsend = gps + 0.5 * args.duration
primary = ':'.join([ifo, args.primary_channel])
thresh = args.frequency_threshold
multipliers = [int(x) for x in args.multipliers.split(',')]
harmonic = args.multiplier_for_threshold
if '{IFO}' in args.primary_frametype:
args.primary_frametype = args.primary_frametype.format(IFO=ifo)
if '{IFO}' in args.aux_frametype:
args.aux_frametype = args.aux_frametype.format(IFO=ifo)
logger.info('{0} Scattering: {1}'.format(ifo, gps))
# set up spectrogram
logger.debug('Setting up a Q-scan spectrogram of {}'.format(primary))
hoft = get_data(primary, start=gps-34, end=gps+34,
frametype=args.primary_frametype,
verbose='Reading primary channel:'.rjust(30))
hoft = highpass(hoft, f_low=thresh).resample(256)
qspecgram = hoft.q_transform(qrange=(4, 150), frange=(0, 60), gps=gps,
fres=0.1, outseg=(gpsstart, gpsend), **ASD_KW)
qspecgram.name = primary
# process channels
channels = [':'.join([ifo, c]) for c in MOTION_CHANNELS]
data = get_data(
channels, start=gpsstart, end=gpsend, frametype=args.aux_frametype,
verbose='Reading auxiliary sensors:'.rjust(30))
count = 0 # running count of plots written
for channel in channels:
logger.info(' -- Processing {} -- '.format(channel))
try:
motion = data[channel].detrend().resample(128)
except KeyError:
logger.warning('Skipping {}'.format(channel))
pass
# project scattering frequencies
fringe = get_fringe_frequency(motion, multiplier=1)
ind = fringe.argmax()
fmax = fringe.value[ind]
tmax = fringe.times.value[ind]
logger.debug('Maximum scatter frequency {0:.2f} Hz at GPS second '
'{1:.2f}'.format(fmax, tmax))
if harmonic * fmax < thresh:
logger.warning('No significant evidence of scattering '
'found in {}'.format(channel))
continue
# plot spectrogram and fringe frequency
output = os.path.join(
args.output_dir,
'%s-%s-%s-{}.png' % (
channel.replace('-', '_').replace(':', '-', 1),
gps, args.duration)
)
logger.debug('Plotting spectra and projected fringe frequencies')
plot.spectral_comparison(
gps, qspecgram, fringe, output.format('comparison'), thresh=thresh,
multipliers=multipliers, colormap=args.colormap)
plot.spectral_overlay(
gps, qspecgram, fringe, output.format('overlay'),
multipliers=multipliers)
logger.info(' -- Channel complete -- ')
count += 1 # increment counter
logger.info('{0:g} chanels plotted in {1}'.format(count, args.output_dir))
if __name__ == "__main__": # pragma: no-cover
sys.exit(main())
| gpl-3.0 |
CTU-IIG/FlexRaySSScheduler | BenchmarkGenerator/Scripts/make_Incremental_plot.py | 1 | 3512 | from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as plt
from matplotlib import cm
import argparse
import csv
import os
# python3 make_MV_vs_Incremental_plot.py --steps 50 --iterations 10 --instances 30 SAE_1.txt Results/results_MV_vs_Incremental.csv
number_of_evaluations = 4
class Plotter:
def __init__(self, iterations: int):
self.dataset = [[0] * number_of_evaluations for _ in range(iterations)]
def read_data(self, results_file: str, iterations: int, instances: int):
if not os.path.exists(results_file):
raise ValueError("The file with results {} does not exists!".format(results_file))
with open(results_file, 'r') as f:
csv_file = csv.reader(f, delimiter=';')
for row in csv_file:
label = row[0]
label = label.split(".", 1)[0]
iteration = int(label.split("_")[4])
self.dataset[iteration][0] += int(row[1])
self.dataset[iteration][1] += int(row[3])
self.dataset[iteration][2] += int(row[5])
self.dataset[iteration][3] += int(row[7])
for i in range(iterations):
for j in range(number_of_evaluations):
self.dataset[i][j] /= instances
def plot(self, iterations: int):
if len(self.dataset) <= 0:
raise ValueError("Dataset should not be empty")
plt.rc("text", usetex=True)
plt.rcParams["xtick.major.pad"]='10'
fig = plt.figure(1)
fig.patch.set_facecolor('white')
axe_x = np.asarray([float(x) for x in range(iterations)])
datasetzip = list(zip(*self.dataset))
styles = ["s-", "D-", "o-", "^-", "*-"]
labels = ["\\Large Lower bound", " \\Large Non-incremental scheduling", "\\Large Incremental scheduling with extensibility optimisation", "\\Large Incremental scheduling without extensibility optimisation"]
cmap = cm.winter
for i in range(number_of_evaluations):
axe_y = np.asarray(datasetzip[i])
plt.plot(axe_x, axe_y, styles[i], markersize=7, color=cmap((i+1)/float(number_of_evaluations + 1)), label="\\rmfamily {}".format(labels[i]))
plt.axis([0, 9.5, 100, 150])
plt.xticks(range(0, 10), ["\\rmfamily \\LARGE 1", "\\rmfamily \\LARGE 2", "\\rmfamily \\LARGE 3", "\\rmfamily \\LARGE 4", "\\rmfamily \\LARGE 5", "\\rmfamily \\LARGE 6", "\\rmfamily \\LARGE 7", "\\rmfamily \\LARGE 8", "\\rmfamily \\LARGE 9", "\\rmfamily \\LARGE 10"])
plt.yticks(range(100, 150, 10), ["\\rmfamily \\LARGE 100", "\\rmfamily \\LARGE 110", "\\rmfamily \\LARGE 120", "\\rmfamily \\LARGE 130", "\\rmfamily \\LARGE 140"])
plt.legend(loc="upper left")
plt.xlabel("{\\rmfamily \\LARGE Iteration}")
plt.ylabel("{\\rmfamily \\LARGE Number of allocated slots}")
# plt.xticks(range(self.folders_count), self.folder_names, rotation=45)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file", help="Original file with the results")
parser.add_argument("--instances", help="Number of instances", type=int, default=10)
parser.add_argument("--iterations", help="Number of iterations", type=int, default=10)
args = parser.parse_args()
plotter = Plotter(args.iterations)
plotter.read_data(args.file, args.iterations, args.instances)
plotter.plot(args.iterations)
| gpl-2.0 |
wwjiang007/flink | flink-python/pyflink/table/serializers.py | 9 | 3095 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import io
from pyflink.serializers import IterableSerializer
from pyflink.table.utils import arrow_to_pandas, pandas_to_arrow
class ArrowSerializer(IterableSerializer):
"""
Serializes pandas.Series into Arrow streaming format data.
"""
def __init__(self, schema, row_type, timezone):
super(ArrowSerializer, self).__init__()
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
def __repr__(self):
return "ArrowSerializer"
def serialize(self, iterable, stream):
writer = None
try:
for cols in iterable:
batch = pandas_to_arrow(self._schema, self._timezone, self._field_types, cols)
if writer is None:
import pyarrow as pa
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def deserialize(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield arrow_to_pandas(self._timezone, self._field_types, [batch])
def load_from_iterator(self, iter):
class IteratorIO(io.RawIOBase):
def __init__(self, iter):
super(IteratorIO, self).__init__()
self.iter = iter
self.leftover = None
def readable(self):
return True
def readinto(self, b):
output_buffer_len = len(b)
input = self.leftover or (self.iter.next() if self.iter.hasNext() else None)
if input is None:
return 0
output, self.leftover = input[:output_buffer_len], input[output_buffer_len:]
b[:len(output)] = output
return len(output)
import pyarrow as pa
reader = pa.ipc.open_stream(
io.BufferedReader(IteratorIO(iter), buffer_size=io.DEFAULT_BUFFER_SIZE))
for batch in reader:
yield batch
| apache-2.0 |
musketeer191/job_analytics | ja_helpers.py | 1 | 35949 | import random
import sklearn.feature_extraction.text as text_manip
import matplotlib.ticker as mtick
from sklearn.decomposition import NMF, LatentDirichletAllocation
from scipy.sparse import *
from scipy.io import *
from collections import Counter
from time import time
# my own modules
import my_util as my_util
from my_util import *
from stat_helpers import *
## Seed for reproducibility
# random.seed(123)
## Helpers for filtering
max_n_word = 3
def filterJDs(post_df, skills, min_n_skill=2):
print('Extracting JDs with at least %d unique skills...' %min_n_skill)
n_post, n_skill = post_df.shape[0], len(skills)
# Count no. of unique skills in each JD
binary_vectorizer = text_manip.CountVectorizer(vocabulary=skills, ngram_range=(1, max_n_word), binary=True)
t0 = time()
print('\tMarking occurrence of %d skills in %d JDs...' %(n_skill, n_post))
doc_skill_occurrence = binary_vectorizer.fit_transform(post_df['clean_text'])
print('Done after %.1fs' %(time() - t0))
post_df['n_uniq_skill'] = doc_skill_occurrence.sum(axis=1).A1
## Remove JDs with <= 1 skills
cond = 'n_uniq_skill >= {}'.format(min_n_skill)
sub_posts = post_df.query(cond)
return sub_posts
def filterSkills(skills, posts, min_n_jd):
print('Extracting skills occuring in at least %d JDs...' %min_n_jd)
# (Re-)count no. of JDs containing each skill
n_posts = posts.shape[0]
t0 = time()
print('\tMarking occurrence of skills in %d JDs ...' %n_posts)
binary_vectorizer = text_manip.CountVectorizer(vocabulary=skills, ngram_range=(1, max_n_word), binary=True)
doc_skill_occurrence = binary_vectorizer.fit_transform(posts['clean_text'])
print('Done after %.1fs' %(time() - t0))
n_jd_by_skill = doc_skill_occurrence.sum(axis=0).A1
# print quantile(n_jd_by_skill)
# Remove skills occuring in <=1 JDs
df = pd.DataFrame({'skill': skills, 'n_jd_with_skill': n_jd_by_skill})
cond = 'n_jd_with_skill >= {}'.format(min_n_jd)
sub_skill_df = df.query(cond)
return sub_skill_df
def cal_test_err(mf_models):
test_error = []
print('No. of topics, Test error, Running time')
for k in ks:
t0 = time()
H = mf_models[k].components_
W_test = mf_models[k].fit_transform(X_test, H=H)
err = la.norm(X_test - np.matmul(W_test, H))
test_error.append(err)
print('%d, %0.1f, %0.1fs' %(k, err, time() - t0))
# end for
return test_error
def findOccurSkills(init_skills, jd_docs):
count_vectorizer = text_manip.CountVectorizer(vocabulary=init_skills, ngram_range=(1, max_n_word))
t0 = time()
print('Counting occurrence of skills with length <= %d ...' %max_n_word)
doc_skill_freq = count_vectorizer.fit_transform(jd_docs)
print('Done after %.1fs' %(time() - t0))
skill_freq = doc_skill_freq.sum(axis=0).A1
skill_df = pd.DataFrame({'skill': init_skills, 'total_freq': skill_freq})
occur_skills_df = skill_df.query('total_freq > 0')
occur_skills = occur_skills_df['skill']
print('No. of skills actually occurring in JDs: %d' %len(occur_skills))
return occur_skills_df
# def findSkills(occur_skills, jd_docs):
# count_vectorizer = text_manip.CountVectorizer(vocabulary=occur_skills, ngram_range=(1, max_n_word))
# t0 = time()
# print('Counting occurrence of skills with length <= %d ...' %max_n_word)
# doc_skill_freq = count_vectorizer.fit_transform(jd_docs)
# print('Doing inverse transform to get skills in each JD...')
# skills_by_jd = count_vectorizer.inverse_transform(doc_skill_freq)
# print('Done after %.1fs' %(time() - t0))
# return skills_by_jd
def filtering(init_posts, init_skills):
n_iter, posts, skills = 0, init_posts, init_skills
n_post = posts.shape[0]
stop_cond, thres = False, .98
while not stop_cond:
n_iter = n_iter + 1
print('Iteration %d' %n_iter)
new_posts = extractJDs(posts, skills, min_n_skill=2)
n_new_post = new_posts.shape[0]
print('No. of posts after filtering: %d' %n_new_post)
skill_df = extractSkills(skills, new_posts, min_n_jd=2)
new_skills = skill_df['skill']
print('No. of skills after filtering: %d' %len(new_skills) )
stop_cond = (n_new_post >= thres*n_post) and (len(new_skills) >= thres*len(skills))
posts = new_posts
n_post = posts.shape[0]
skills = new_skills
# end while
return posts, skills
def countOccur_ngram(n=1):
t0 = time()
print('Marking occurrence of {}-gram skills...'.format(n))
# use option binary to indicate that we only care whether a given skill occurs or not, not the freq of the skill
vectorizer = text_manip.CountVectorizer(vocabulary=skills, binary=True, ngram_range=(n,n))
doc_ngram_occurrence = vectorizer.fit_transform(jd_docs)
print('Done after %.1fs' %(time() - t0))
n_ngram_by_jd = doc_ngram_occurrence.sum(axis=1).A1
return pd.DataFrame({'job_id': posts['job_id'], 'n_{}gram'.format(n): n_ngram_by_jd})
def buildDocNgramMat(n, jd_docs, skills):
t0 = time()
print('Counting occurrence of {}-gram skills...'.format(n))
vectorizer = text_manip.CountVectorizer(vocabulary=skills, ngram_range=(n,n))
doc_ngram_mat = vectorizer.fit_transform(jd_docs)
print('Done after %.1fs' %(time() - t0))
return doc_ngram_mat
# global n_proc_doc
# n_proc_doc=0
def rmSkills(d, skills):
## there is problem with using a global count, turn off tmp
global n_proc_doc
n_proc_doc += 1;
if (n_proc_doc % 10000)==0:
print('Removal for {} docs and counting...'.format(n_proc_doc))
res = d
for sk in skills:
res = res.replace(sk, '')
return res
def buildDocSkillMat(jd_docs, skill_df, folder):
"""
@brief {Build a document-skill matrix where each entry $e(d, s)$
is the freq of skill $s$ in job description $d$. Handle
overlapping problem bw n-grams
(e.g. business in 2-gram 'business management' is regarded
diff from business in 1-gram 'business')}
@param jd_docs The clean jd documents
@param skill_df The skill df
@param folder The folder to store intermediate matrices
{doc_unigram, doc_bigram, doc_trigram},
None if don't want to store them.
@return The sparse document-skill matrix.
"""
def save(sp_mat, mat_name):
fname = folder + mat_name + '.mtx'
with(open(fname, 'w')) as f:
mmwrite(f, sp_mat)
print('Saved {} matrix'.format(mat_name))
global n_proc_doc
if not folder:
print('No folder passed, will not save intermediate matrices.')
trigram_skills = np.unique(skill_df.query('n_word == 3')['skill'])
bigram_skills = np.unique(skill_df.query('n_word == 2')['skill'])
unigram_skills = np.unique(skill_df.query('n_word == 1')['skill'])
doc_trigram = buildDocNgramMat(n=3, jd_docs=jd_docs, skills=trigram_skills)
if folder:
save(doc_trigram, 'doc_trigram')
print('Removing tri-grams from JDs to avoid duplications...')
n_proc_doc = 0
jd_docs = jd_docs.apply(rmSkills, skills=trigram_skills)
print('Done')
doc_bigram = buildDocNgramMat(n=2, jd_docs=jd_docs, skills=bigram_skills)
if folder:
save(doc_bigram, 'doc_bigram')
print('Removing bi-grams from JDs...')
n_proc_doc = 0
jd_docs = jd_docs.apply(rmSkills, skills = bigram_skills)
print('Done')
doc_unigram = buildDocNgramMat(n=1, jd_docs=jd_docs, skills=unigram_skills)
if folder:
save(doc_unigram, 'doc_unigram')
doc_skill = hstack([doc_unigram, doc_bigram, doc_trigram])
return doc_skill
def getSkills(doc_idx, doc_term, skills):
row = doc_term.getrow(doc_idx)
indices = row.nonzero()[1]
occur_skills = skills[indices]
return pd.DataFrame({'occur_skills': ','.join(occur_skills), 'n_skill': len(occur_skills)}, index=[doc_idx])
# including original document is meant for sanity checking
# return pd.DataFrame({'doc': docs[doc_idx], 'occur_skills': ','.join(occur_skills), 'n_skill': len(occur_skills)}, index=[doc_idx])
def getSkills4Docs(docs, doc_term, skills): # doc_term is the doc-term count matrix built from docs (so train/test_docs go with train/test_doc_term resp)
n_doc = len(docs)
frames = [getSkills(doc_idx=dd, doc_term=doc_term, skills=skills) for dd in range(n_doc)]
res = pd.concat(frames)
# res = res.drop_duplicates()
return res
def initLDA_model(k, beta):
alpha = 50.0/k
print("Init LDA with priors: alpha = %.1f, beta = %.1f" %(alpha, beta))
model = LatentDirichletAllocation(n_topics=k, max_iter=5, learning_method='online', learning_offset=50., random_state=0,
doc_topic_prior=alpha, topic_word_prior=beta) # verbose=1
return model
def trainLDA(beta, ks, trainning_set):
lda_scores = []
lda = {k: initLDA_model(k, beta) for k in ks}
print('Fitting LDA models...')
print('No. of topics, Log-likelihood, Running time')
for k in ks:
t0 = time()
lda[k].fit(trainning_set)
s = lda[k].score(trainning_set)
print('%d, %0.1f, %0.1fs' %(k, s, time() - t0))
lda_scores.append(s)
return lda
def testLDA(lda, ks, test_set):
perp = [lda[k].perplexity(test_set) for k in ks]
perp_df = pd.DataFrame({'No. of topics': ks, 'Perplexity': perp})
lda_best_k = ks[np.argmin(perp)]
print('Best no. of topics for LDA: %d' %lda_best_k)
return perp_df
def toIDF(terms, doc_term_mat):
n_doc, n_term = doc_term_mat.shape[0], doc_term_mat.shape[1]
# no. of docs containing a term = no. of non zero entries in the col of the term
n_doc_with_term = [doc_term_mat.getcol(t).nnz for t in range(n_term)]
res = pd.DataFrame({'term': terms, 'n_doc_with_term': n_doc_with_term})
res = res.query('n_doc_with_term > 0')
res['idf'] = np.log10(np.divide(n_doc, n_doc_with_term))
return res
def getClusterAtRow(i, df):
r = df.iloc[i]
cluster = r['cluster']
prob = str(round(r['cluster_prob'], 3))
s = ''.join([cluster, '(', prob, ')'])
return s
# LDA_DIR = 'd:/larc_projects/job_analytics/results/skill_cluster/new/lda/'
# clusters = pd.read_csv(LDA_DIR + 'clusters.csv')['cluster']
# print('Loaded cluster labels as follow:')
# print(clusters)
def getTopClusterProb(row, doc_topic_distr):
doc_idx = row.name
probs = doc_topic_distr[doc_idx, :]
return round(max(probs), 4)
def getTopClusters(k, doc_idx, doc_df, doc_topic_distr):
probs = doc_topic_distr[doc_idx, :]
df = pd.DataFrame({'cluster': clusters, 'cluster_prob': probs})
df.sort_values('cluster_prob', ascending=False, inplace=True)
top_k = [getClusterAtRow(i, df) for i in range(k)]
row = doc_df.iloc[doc_idx]
job_id, doc = row['job_id'], row['doc']
return pd.DataFrame({'job_id': job_id, 'doc': doc, 'top_{}_cluster'.format(k): ';'.join(top_k)}, index=[doc_idx])
# df.head(k)
# df['job_id'] = doc_df.iloc[doc_idx]['job_id']
# df['doc'] = doc_df.iloc[doc_idx]['doc']
def findIndex(arr, thres):
sub_sums = {k: sum(arr[0:k]) for k in range(len(arr))}
for k in range(len(arr)):
if sub_sums[k] > thres:
return k
def getTermsInDoc(row, doc_term_mat, vocab):
idx = doc_term_mat[row].nonzero()[1]
occur_terms = [vocab[i] for i in idx]
return occur_terms
def getTopClusters_GT(row, doc_topic_distr, thres):
doc_idx = row.name
probs = doc_topic_distr[doc_idx, :]
df = pd.DataFrame({'cluster': clusters, 'cluster_prob': probs})
df.sort_values('cluster_prob', ascending=False, inplace=True)
## get top k clusters such that sum(prob_of_top_k) > thres
k = findIndex(df['cluster_prob'], thres)
top_k = [getClusterAtRow(i, df) for i in range(k)]
return ';'.join(top_k)
# return k
## Topics learned by MALLET LDA ====================================
def getTopicRow(i, df):
row = df.iloc[i, :]
topic, prob = row['topic'], str(round(row['prob'], 3))
return ''.join([topic, '(', prob, ')'])
def getTopTopics(row, topics, thres=.5):
df = pd.DataFrame({'topic': topics, 'prob': row})
df = df.sort_values('prob', ascending=False)
k = findIndex(df['prob'], thres)
top_k = [getTopicRow(i, df) for i in range(k)]
return ';'.join(top_k)
## Skill clustering analysis
def plotSkillDist(res):
fig = plt.figure()
n, bins, patches = plt.hist(res['n_skill'], bins=np.unique(res['n_skill']))
plt.xlabel('# skills in JD'); plt.ylabel('# JDs')
plt.xticks(range(0, 120, 10))
plt.grid(True)
return fig
def getGroupMedian(g1, g2, g3, g4):
m1 = np.median(g1['n_top_cluster']); m2 = np.median(g2['n_top_cluster'])
m3 = np.median(g3['n_top_cluster']); m4 = np.median(g4['n_top_cluster'])
print('Medians of the groups:')
return pd.DataFrame({'range_of_n_skill': ['[2, 7)', '[7, 12)', '[12, 18)', '[18, 115]'],
'median_of_n_top_cluster': [m1, m2, m3, m4]})
# x = [1,2,3,4]; labels = ['[2, 7)', '[7, 12)', '[12, 18)', '[18, 115]']
def mixtureSizePlot(g1, g2, g3, g4):
groups = [g1['n_top_cluster'], g2['n_top_cluster'], g3['n_top_cluster'], g4['n_top_cluster']]
fig = plt.boxplot(groups)
plt.xlabel('# skills in job post'); plt.ylabel('Mixture size') # # most likely clusters
plt.xticks(x, labels); plt.ylim(0, 9)
return fig
def topClusterProbPlot(g1, g2, g3, g4):
groups = [g1['prob_top_cluster'], g2['prob_top_cluster'], g3['prob_top_cluster'], g4['prob_top_cluster']]
fig = plt.boxplot(groups)
plt.xlabel('# skills in job post'); plt.ylabel('Probability of top cluster')
plt.xticks(x, labels);
plt.grid(True)
return fig
def errorBarPlot(res, thres):
g1 = res.query('n_skill < 7'); g2 = res.query('n_skill >= 7 & n_skill < 12')
g3 = res.query('n_skill >= 12 & n_skill < 18'); g4 = res.query('n_skill >= 18')
print('# posts in 4 groups:'); print(','.join([str(g1.shape[0]), str(g2.shape[0]), str(g3.shape[0]), str(g4.shape[0])]))
## Cal avg, min, max of each group
col = 'n_top_cluster_{}'.format(int(thres*100))
groups = [g1[col], g2[col], g3[col], g4[col]]
min_groups = np.asarray(map(min, groups)); max_groups = np.asarray(map(max, groups));
avg_groups = np.asarray(map(np.mean, groups))
## Plot
lower_error = avg_groups - min_groups; upper_error = max_groups - avg_groups
asymmetric_error = [lower_error, upper_error]
y = avg_groups
fig = plt.errorbar(x, y, yerr=asymmetric_error, fmt='o')
plt.xlim(0, 5);
plt.ylim(0, 7)
plt.xticks(x, labels); plt.grid(True)
plt.xlabel('# skills in job post'); plt.ylabel('# skill clusters assigned to job post')
plt.title('Mixture size (threshold={})'.format(thres))
return fig
def getIndex(i, df):
return df.iloc[i].name
## Topic modelling ====================================
# get top words of a topic (i.e. a word dist)
def get_top_words(n_top_words, word_dist, feature_names):
norm_word_dist = np.divide(word_dist, sum(word_dist))
sorting_idx = word_dist.argsort()
top_words = [feature_names[i] for i in sorting_idx[:-n_top_words - 1:-1]]
probs = [norm_word_dist[i] for i in sorting_idx[:-n_top_words - 1:-1]]
return pd.DataFrame({'top_words': top_words, 'word_probs': probs})
def print_top_words(n_top_words, model, feature_names):
for topic_idx, topic in enumerate(model.components_):
norm_topic = np.divide(topic, sum(topic))
print("Topic #%d:" % topic_idx)
print(" ".join([(feature_names[i] + '(%0.3f' %norm_topic[i] + ')')
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# get top words learnt by a model
def top_words_df(n_top_words, model, feature_names):
res = pd.DataFrame({'topic':[], 'top_words':[], 'word_probs':[]})
for t_idx, topic in enumerate(model.components_):
top_words_of_topic = get_top_words(n_top_words, word_dist=topic, feature_names=feature_names)
topic_idx = np.ones(n_top_words)*(t_idx+1)
tmp = pd.concat([pd.DataFrame({'topic_idx': topic_idx }), top_words_of_topic], axis=1)
res = pd.concat([res, tmp])
return res[:][['topic_idx', 'top_words', 'word_probs']] # re-order columns as desired
## Similarity scores ====================================
def skillSim(p1, p2):
skills1, skills2 = set(p1['occur_skills'].split(',')), set(p2['occur_skills'].split(','))
intersection, union = skills1.intersection(skills2), skills1.union(skills2)
return len(intersection)/float(len(union))
# Main workhorse, return sims in either data frame or matrix format,
# output format is controlled by out_fmt
def pairwiseSim(posts, doc_topic_distr, out_fmt='data_frame', verbose=True):
def topicSim(i, j, df):
idx1, idx2 = getIndex(i, df), getIndex(j, df)
d1, d2 = doc_topic_distr[idx1, :], doc_topic_distr[idx2, :]
topic_sim = 1 - np.sqrt(JSD(d1, d2))
return topic_sim
def skillSimOfRows(i, j, df):
"""
Jaccard similarity between 2 posts at rows i and j of given df
"""
p1, p2 = df.iloc[i], df.iloc[j]
return skillSim(p1, p2)
# employer = p1['employer_name']
# job_id1, job_id2 = p1['job_id'], p2['job_id']
def simPair(i, j, df):
topic_sim = topicSim(i, j, df)
skill_sim = skillSimOfRows(i, j, df)
# meta-info
idoc, jdoc = df.iloc[i], df.iloc[j]
job_id1, job_id2 = idoc['job_id'], jdoc['job_id']
employer1, employer2 = idoc['employer_name'],jdoc['employer_name']
skills1, skills2 = idoc['occur_skills'], jdoc['occur_skills']
# doc1, doc2 = idoc['doc'], jdoc['doc']
res = pd.DataFrame({'job_id1': job_id1, 'job_id2': job_id2,
'topic_sim': round(topic_sim, 3), 'skill_sim': round(skill_sim, 2),
'skills1': skills1, 'skills2': skills2,
'employer1': employer1, 'employer2': employer2}, index=[1]) # 'doc1': doc1, 'doc2': doc2
return res
def sim2Subseq(i, df, out_fmt='lists'):
"""Similarity scores bw a post at row i with subseq posts in df"""
n_doc = df.shape[0]
# If there are actually subseq posts
if (i <= n_doc-2):
if (i % 50 == 0) and verbose:
print('\t {} posts and counting...'.format(i))
if (out_fmt == 'data_frame'):
frames = [simPair(i, j, df) for j in range(i, n_doc)] # i+1
res = pd.concat(frames).reset_index(); del res['index']
return res
if (out_fmt == 'lists'):
topic_sims = [topicSim(i, j, df) for j in range(i, n_doc)] # i+1
skill_sims = [skillSimOfRows(i, j, df) for j in range(i+1, n_doc)]
return pd.DataFrame({'topic_sim': topic_sims, 'skill_sim': skill_sims})
pass
def simMat(posts, level='topic'):
n_post = posts.shape[0]
sims = np.zeros(shape=(n_post, n_post))
for i in xrange(n_post):
sims[i, i] = 1
# j < i
for j in xrange(i):
sims[i, j] = sims[j, i]
# j > i (only exist if i is not the last post, ie n_post-1)
if (i < n_post-1) :
if (level == 'topic'):
sims[i, (i+1):n_post] = sim2Subseq(i, posts, out_fmt='lists')['topic_sim']
if (level == 'skill'):
sims[i, (i+1):n_post] = sim2Subseq(i, posts, out_fmt='lists')['skill_sim']
return sims
def simDF(posts):
n_post = posts.shape[0]
# t0 = time()
frames = [sim2Subseq(i, posts, out_fmt='data_frame') for i in range(n_post)]
# print('Done after %.1fs' %(time() - t0))
return pd.concat(frames).reset_index()
# n_post = posts.shape[0]
# print('Computing pairwise similarity scores among {} job posts...'.format(n_post))
# print('each post is compared with subseq posts...')
if (out_fmt == 'data_frame'):
return simDF(posts)
if (out_fmt == 'matrix_topic_sim'):
return simMat(posts, level='topic')
if (out_fmt == 'matrix_skill_sim') :
return simMat(posts, level='skill')
def rmBadPosts(posts, title):
thres = posts.n_skill.quantile(.25)
# print('Removed 1st quarter of posts of {}, each having <= {} skills'.format(title, int(thres)))
return posts[posts.n_skill > thres]
def sampleIfMoreThan(max_n_post, t, post_df):
posts = post_df[post_df.title == t]
return posts if len(posts) <= max_n_post else posts.sample(max_n_post)
def cachePosts(titles, post_df):
max_n_post = 100
res = {t: sampleIfMoreThan(max_n_post, t, post_df) for t in titles}
print('Done caching sampled posts for titles with more than {}'.format(max_n_post))
return res
def crossSimScores(posts1, posts2, doc_topic_distr, verbose=False):
"""
Return cross sims (topic_sim and skill_sim) bw posts in 2 sets posts1 and posts2
"""
def sims(p1, p2):
idx1, idx2 = p1.name, p2.name
d1, d2 = doc_topic_distr[idx1, :], doc_topic_distr[idx2, :]
topic_sim = 1 - np.sqrt(JSD(d1, d2))
skill_sim = skillSim(p1, p2)
res = pd.DataFrame({'job_id1': p1.job_id, 'job_title1': p1.title, 'employer1': p1.employer_name,
'job_id2': p2.job_id, 'job_title2': p2.title, 'employer2': p2.employer_name,
'topic_sim': topic_sim, 'skill_sim': skill_sim,
'skills1': p1.occur_skills, 'skills2': p2.occur_skills},
index=[1])
return res
# global count; count = 0
def sims2Set(p, posts):
n_post = posts.shape[0]
frames = [sims(p, posts.iloc[i]) for i in xrange(n_post)]
# global count; count += 1
# if (count % 10 == 0) and verbose:
# print('%d posts and counting...' %count)
return pd.concat(frames)
n1 = posts1.shape[0]; n2 = posts2.shape[0]
frames = [sims2Set(posts1.iloc[i], posts2) for i in xrange(n1)]
res = pd.concat(frames);
return res
def postSimScore(posts1, posts2, doc_topic_distr):
## Rm lousy posts with too few skills from both sets
# posts1 = rmBadPosts(posts1, t1)
# posts2 = rmBadPosts(posts2, t2)
n1, n2 = posts1.shape[0], posts2.shape[0]
if (n1 > 0) and (n2 > 0):
res = crossSimScores(posts1, posts2, doc_topic_distr, verbose=False)
topic_sim = round(res['topic_sim'].mean(), 3)
return topic_sim # return res
return np.nan
def titleSim(t1, t2, doc_topic_distr, df=None, cached_posts=None, verbose=False):
# posts1 = df[df.title == t1]
# posts2 = df[df.title == t2]
posts1 = cached_posts[t1]
posts2 = cached_posts[t2]
if verbose:
n1, n2 = posts1.shape[0], posts2.shape[0]
print('\t{} ({} posts) vs. {} ({} posts)'.format(t1, n1, t2, n2))
return postSimScore(posts1, posts2, doc_topic_distr)
def sims2SubseqTitle(i, titles, doc_topic_distr, cached_posts=None, verbose=False):
'''
@param i: index of the focus title
@param titles
@return topic sims of i-th title with its sub seq titles in the given titles
'''
n_title = len(titles); focus_title = titles[i]
sub_seq_titles = titles[i+1 : n_title]
res = pd.DataFrame({'t1': sub_seq_titles, 't2': focus_title})
res['topic_sim'] = res['t1'].apply(titleSim, t2=focus_title,
doc_topic_distr=doc_topic_distr,
cached_posts=cached_posts, verbose=verbose)
print('\t Calculated sims of {} to all subseq titles'.format(focus_title))
return res
def calSims4Batch(b, size, titles, doc_topic_distr, cached_posts):
start = b*size; end = start + size
t0 = time()
frames = [sims2SubseqTitle(i, titles, doc_topic_distr, cached_posts) for i in range(start, end)]
elapse = round(time() - t0, 1)
print('\tFinished sim cals for a batch of {} job titles in {}s'.format(size, elapse))
return frames
def saveBatch(b, res, tmp_dir):
res = res.reset_index(); del res['index']
res.to_csv(tmp_dir + 'b{}.csv'.format(b), index=False)
print('\t Saved results of batch {}'.format(b))
def calAndSaveSims4Batch(b, bsize, titles, doc_topic_distr, cached_posts, tmp_dir):
frames = calSims4Batch(b, bsize, titles, doc_topic_distr, cached_posts)
res = pd.concat(frames)
saveBatch(b, res, tmp_dir)
return res
def simsAmong(titles, doc_topic_distr, df, verbose=False, bsize=50, tmp_dir=''):
n_title = len(titles)
msg = '# job titles: {}. For job titles with > 100 posts, only sample 100 posts.'
print(msg.format(n_title))
if n_title > 1:
cached_posts = cachePosts(titles, df) # s.t. we do not have to repeat sampling
# if no. of title is large, it is better to
# cal for each batch and save defensively to avoid loss of results
n_batch = n_title/bsize; remains = n_title % bsize
frames = [calAndSaveSims4Batch(b, bsize, titles, doc_topic_distr, cached_posts, tmp_dir)
for b in xrange(0, n_batch)]
return pd.concat(frames)
else: # return an empty data frame instead of None
return pd.DataFrame({'t1': [], 't2': [], 'topic_sim': []})
def buildTopkFrom(rel_sims, k, job_title):
rel_sims.sort_values('topic_sim', ascending=False, inplace=True)
tmp = rel_sims.head(k).copy()
# if focus job title is the 2nd column, swap it to 1st column
part1 = tmp[tmp.t1 == job_title]
part2 = tmp[tmp.t2 == job_title]
topk = part1.append(swapCols('t1', 't2', part2))
topk['title_n_sim'] = pasteCols('t2', 'topic_sim', topk)
return topk
# return ', '.join(topk['title_n_sim'])
def topkByFunction(job_title, k, func_sims):
q = 't1 == "{}" or t2 == "{}"'.format(job_title, job_title)
rel_sims = func_sims.query(q)
return buildTopkFrom(rel_sims, k, job_title)
## Funcs for filtering ====================================
def titlesIn(domain, title_df):
return title_df.title[title_df.domain == domain].unique()
def titlesWithFunc(pri_func, title_df):
sub_df = title_df[title_df.pri_func == pri_func]
sub_df = sub_df.sort_values('n_post', ascending=False)
return sub_df.title.unique()
def titlesHavingAtLeast(records, min_post):
return list(records[records.n_post >= min_post]['title'])
## Visualization ====================================
def getPostsInPairs(pairs, df):
job_ids = set(pairs.job_id1.append(pairs.job_id2))
posts = df[df.job_id.isin(job_ids)]
print('# posts retrieved: %d' %posts.shape[0])
return posts
def pretty(employer_name='Millennium Capital Management (Singapore) Pte. Ltd.'):
name = employer_name.title()
name = name.replace('Pte.','').replace('Ltd.','').replace('(Singapore)','').replace('Job-', '')
return name.strip()
def plotDist(post, doc_topic_distr, labels):
"""
@param post
@param doc_topic_distr contains cluster distributions of all posts
@return Bar chart of the cluster distribution of given post (bars at x locs)
"""
n_topic = doc_topic_distr.shape[1]
topic_idx = np.arange(1, n_topic + 1)
if len(topic_idx) != len(labels):
print('# xticks ({}) != # labels ({})!'.format(len(topic_idx), len(labels)))
pass
if len(topic_idx) == len(labels):
job_id, employer, job_title = pretty(post['job_id']), pretty(post['employer_name']), post['title']
idx = post.name # post['index']
probs = doc_topic_distr[idx, :]
bars = plt.bar(topic_idx, probs) # width = 0.3
plt.xticks(topic_idx, labels, rotation=45)
plt.xlim(0, n_topic + 1)
plt.grid(True)
plt.title(job_id + '(' + job_title + ' at ' + employer + ')')
# print('x = {}'.format(x))
return bars
def topicDists(p1, p2, doc_topic_distr, labels):
fig, axes = plt.subplots(2, sharex=True, figsize=(6,6))
plt.subplot(211)
bars1 = plotDist(p1, doc_topic_distr, labels)
plt.subplots_adjust(hspace=.3)
plt.subplot(212)
bars2 = plotDist(p2, doc_topic_distr, labels)
plt.xlabel('Skill Clusters', fontsize=16)
# hide x-ticks of 1st subplot (NOTE: this kind of fine tune need to be done last, why?)
plt.setp(fig.axes[0].get_xticklabels(), visible=False)
return fig
def vizDists4Pair(row, df, doc_topic_distr, labels):
"""
@brief Plot cluster distributions of the pair of posts stored at given row (in a df of post sims)
@param row
@param df {storing posts and their indices in the matrix doc_topic_distr}
@return {2 bar charts of the cluster distributions, sharing x axis}
"""
p1 = df[df.job_id == row.job_id1].iloc[0]; p2 = df[df.job_id == row.job_id2].iloc[0]
fig = topicDists(p1, p2, doc_topic_distr, labels)
topic_sim, skill_sim = round(row.topic_sim, 3), round(row.skill_sim, 3)
title = 'Topic similarity: {}, skill similarity {}'.format(topic_sim, skill_sim)
fig.suptitle(title, fontsize=16)
return fig
topic_df = pd.read_csv('d:/larc_projects/job_analytics/results/lda/20_topics.csv')
labels = map(str.upper, topic_df['label'])
def vizPostPair(i, sim_df, post_df, doc_topic_distr, labels, abbv_title=''):
row = sim_df.iloc[i]
fig = vizDists4Pair(row, post_df, doc_topic_distr, labels)
fig.savefig(RES_DIR + 'fig/{}_p{}.pdf'.format(abbv_title, i+1))
plt.show(); plt.close()
# May not be needed anymore
def vizTopicDists(posts, doc_topic_distr, figsize):
"""
Plot cluster distributions of posts
"""
n_post = posts.shape[0]
fig, axarr = plt.subplots(n_post, sharex='col', sharey='row', figsize=figsize) # sharex=True
n_group = 2; group_size = n_post/n_group
lasts = range((group_size-1)*n_group, n_post)
for i in range(n_post):
plt.subplot(group_size, n_group, p+1)
plotDist(posts.iloc[i], doc_topic_distr)
# Show xtick labels and x-label only for the last subplot in each group
if p in lasts:
plt.xticks(x, labels, rotation=45)
plt.xlabel('Skill Clusters', fontsize=16)
# Show ylabel at the middle
# if p==(n_post/2 - 1):
# plt.ylabel('Probability', fontsize=16)
## Fine tune the fig
fig.subplots_adjust(hspace=.5)
# In each group, hide xticks on all subplots except the last one
hide_xticks(fig, lasts)
return fig
def vizSkillSim(sim_df, ax, sci_fmt=True, fontsize=16):
skill_sim = sim_df.skill_sim
hist = ax.hist(skill_sim)
ax.grid(True)
skill_sim_mean, skill_sim_std = round(skill_sim.mean(), 2), round(skill_sim.std(), 2)
xl = 'Skill Similarity\n' + r'$(\mu: {}, \sigma: {})$'.format(skill_sim_mean, skill_sim_std)
plt.xlabel(xl, fontsize=fontsize)
plt.ylabel('Count', fontsize=fontsize)
if sci_fmt:
ax.yaxis.set_major_formatter( mtick.FormatStrFormatter('%.1e') )
# setAxProps(ax, fontproperties)
return hist
def vizTopicSim(sim_df, ax=None, sci_fmt=True, fontsize=16):
topic_sim = sim_df.topic_sim
if ax:
ax.hist(topic_sim)
ax.grid(True)
else:
plt.hist(topic_sim)
plt.grid(True)
topic_sim_mean, topic_sim_std = round(topic_sim.mean(), 3), round(topic_sim.std(), 3)
xl = 'Topic Similarity\n' + r'$(\mu: {}, \sigma: {})$'.format(topic_sim_mean, topic_sim_std)
plt.xlabel(xl, fontsize=fontsize)
plt.ylabel('# pairs of job titles', fontsize=fontsize)
if sci_fmt:
ax.yaxis.set_major_formatter( mtick.FormatStrFormatter('%.1e') )
# setAxProps(ax, fontproperties)
def plotSimDists(sim_df, figsize=(10, 5), sci_fmt=True):
"""
@param sim_df
@return 2 hists of topic_sim and skill_sim (in sim_df) of job posts
"""
fig, axes = plt.subplots(1, 2, sharey=True, figsize=figsize)
fontsize = 16; fontweight = 'bold'
fontproperties = {'weight' : fontweight} # 'family':'sans-serif','sans-serif':['Helvetica'], 'size' : fontsize
ax = plt.subplot(1,2,1)
skill_sim_hist = vizSkillSim(sim_df, ax, sci_fmt)
plt.subplots_adjust(wspace=.5, bottom=.15) # top=.9
ax = plt.subplot(1,2,2)
topic_sim_hist = vizTopicSim(sim_df, ax, sci_fmt)
return fig
def viz(sims):
fig, ax = plt.subplots()
vizTopicSim(sims, ax)
fig.subplots_adjust(bottom=.2)
return fig
def vizJobPostDist(by_n_post):
"""
@param by_n_post: group job titles by their number of posts
@return The 2 distributions of job posts in job titles
before and after title standardization
"""
fig = plt.figure()
n_post_vals = by_n_post.n_post
plt.scatter(x=n_post_vals, y=by_n_post.n_title, marker='o', c='b')
plt.scatter(x=n_post_vals, y=by_n_post.n_title_after_std, marker='x', c='r')
plt.loglog()
plt.xlabel('# job posts'); plt.ylabel('# job titles')
plt.xlim(min(n_post_vals), max(n_post_vals)*10)
plt.grid(True)
plt.legend(['Before title standardization', 'After title standardization'])
return fig
## Eval against SkillFuture framework
def skillFreq(posts):
skills_in_posts = ','.join(posts.occur_skills); ls_skills = skills_in_posts.split(',')
c = Counter(ls_skills)
skill_df = pd.DataFrame({'skill': c.keys(), 'freq': c.values()})
return skill_df.sort_values('freq', ascending=False)
def getTitleStats(posts, titles=None):
"""
@param titles, None if we just want to get stats for all titles in
the posts instead of a specific set of titles
@param posts
@return The statistics for the titles in the given posts, not in whole ds
"""
by_title = posts.groupby('title')
tmp = by_title.agg({'n_skill': np.mean, 'job_id': len, 'employer_id': 'nunique'})
tmp = tmp.reset_index();
tmp = tmp.rename(columns={'job_id': 'n_post', 'n_skill': 'avg_n_skill',
'employer_id': 'n_employer'})
if not titles:
return tmp.sort_values('n_post', ascending=False).round(1)
if titles:
tmp = tmp[tmp.title.isin(titles)]
return tmp.sort_values('n_post', ascending=False).round(1)
# def freq(sk, skill_sets):
# count = 0;
# for ss in skill_sets:
# if sk in ss: count += 1
# return count
## Others
def findOutliers(res):
lif, lof = getLowerFences(res['topic_sim'])
sus_outliers = res.query('topic_sim < {}'.format(lif)).query('topic_sim > {}'.format(lof))
outliers = res.query('topic_sim < {}'.format(lof))
return [sus_outliers, outliers]
def analyzeOutliers(res):
outliers = pd.concat(findOutliers(res))
return outliers.sort_values('topic_sim')
def trainNMF(tfidf_mat):
pass
def plotMetrics(train_metric, test_metric, model_name):
fig = plt.figure(figsize=(6,6))
plt.subplot(2, 1, 1)
plt.plot(ks, train_metric)
plt.xlabel('No. of topics')
plt.ylabel(r'$\| X_{train} - W_{train} H \|_2$')
plt.title('Error of {} on train set'.format(model_name))
plt.grid(True)
plt.xticks(ks)
plt.subplots_adjust(wspace=.5, hspace=.5)
plt.subplot(2, 1, 2)
plt.plot(ks, test_metric)
plt.xlabel('No. of topics')
plt.ylabel(r'$\| X_{test} - W_{test} H \|_2$')
plt.title('Error of {} on test set'.format(model_name))
plt.grid(True)
plt.xticks(ks)
plt.show()
return fig | gpl-3.0 |
macks22/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
recarroll/opengm | src/interfaces/python/examples/python_visitor_gui.py | 14 | 1377 | """
Usage: python_visitor_gui.py
This script shows how one can implement visitors
in pure python and inject them into OpenGM solver.
( not all OpenGM solvers support this kind of
code injection )
"""
import opengm
import numpy
import matplotlib
from matplotlib import pyplot as plt
shape=[100,100]
numLabels=10
unaries=numpy.random.rand(shape[0], shape[1],numLabels)
potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4)
gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts)
inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5))
class PyCallback(object):
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3))
matplotlib.interactive(True)
def begin(self,inference):
print "begin of inference"
def end(self,inference):
print "end of inference"
def visit(self,inference):
gm=inference.gm()
labelVector=inference.arg()
print "energy ",gm.evaluate(labelVector)
labelVector=labelVector.reshape(self.shape)
plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest")
plt.draw()
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
argmin=inf.arg()
| mit |
moutai/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
jesserobertson/daft | examples/thicklines.py | 7 | 1212 | """
T-shirt style
=============
Don't like dainty thin lines? Need to make graphical-model-themed
conference schwag? Then `line_width` is the parameter for you. Also
check out the `preamble` option in the `matplotlib.rc` command.
"""
from matplotlib import rc
rc("font", family="serif", size=14)
rc("text", usetex=True)
rc('text.latex',
preamble="\usepackage{amssymb}\usepackage{amsmath}\usepackage{mathrsfs}")
import daft
# Instantiate the PGM.
pgm = daft.PGM([2.3, 2.05], origin=[0.3, 0.3], line_width=2.5)
# Hierarchical parameters.
pgm.add_node(daft.Node("alpha", r"$\boldsymbol{\alpha}$", 0.5, 2, fixed=True))
pgm.add_node(daft.Node("beta", r"$\boldsymbol{\beta}$", 1.5, 2))
# Latent variable.
pgm.add_node(daft.Node("w", r"$\boldsymbol{w_n}$", 1, 1))
# Data.
pgm.add_node(daft.Node("x", r"$\boldsymbol{x_n}$", 2, 1, observed=True))
# Add in the edges.
pgm.add_edge("alpha", "beta")
pgm.add_edge("beta", "w")
pgm.add_edge("w", "x")
pgm.add_edge("beta", "x")
# And a plate.
pgm.add_plate(daft.Plate([0.5, 0.5, 2, 1], label=r"$\boldsymbol{n = 1, \cdots, N}$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("thicklines.pdf")
pgm.figure.savefig("thicklines.png", dpi=150)
| mit |
wanderknight/trading-with-python | lib/interactivebrokers.py | 77 | 18140 | """
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/random_projection.py | 6 | 23301 | # -*- coding: utf8
"""Random Projection transformers.
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <[email protected]>,
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, check_is_fitted
from .utils.validation import _deprecate_positional_args
from .exceptions import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
@_deprecate_positional_args
def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):
"""Find a 'safe' number of components to randomly project to.
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape (n_samples,
n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix of shape (n_components, n_features) (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or array-like of int
Number of samples that should be a integer greater than 0. If an array
is given, it will compute a safe number of components array-wise.
eps : float or ndarray of shape (n_components,), dtype=float, \
default=0.1
Maximum distortion rate in the range (0,1 ) as defined by the
Johnson-Lindenstrauss lemma. If an array is given, it will compute a
safe number of components array-wise.
Returns
-------
n_components : int or ndarray of int
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation."""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_features)
def _gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : ndarray of shape (n_components, n_features)
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def _sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection.
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float or 'auto', default='auto'
Ratio of non-zero component in the random projection matrix in the
range `(0, 1]`
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : {ndarray, sparse matrix} of shape (n_components, n_features)
The generated Gaussian random matrix. Sparse matrix will be of CSR
format.
See Also
--------
SparseRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for _ in range(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(TransformerMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', *, eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
@abstractmethod
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : {ndarray, sparse matrix} of shape \
(n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y
Ignored
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert self.components_.shape == (self.n_components_, n_features), (
'An error has occurred the self.components_ matrix has '
' not the proper shape.')
return self
def transform(self, X):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data to project into a smaller dimensional space.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples, n_components)
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self)
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection.
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int or 'auto', default='auto'
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : float, default=0.1
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when `n_components` is set to
'auto'. The value should be strictly positive.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
n_components_ : int
Concrete number of components computed when n_components="auto".
components_ : ndarray of shape (n_components, n_features)
Random matrix used for the projection.
Examples
--------
>>> import numpy as np
>>> from sklearn.random_projection import GaussianRandomProjection
>>> rng = np.random.RandomState(42)
>>> X = rng.rand(100, 10000)
>>> transformer = GaussianRandomProjection(random_state=rng)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(100, 3947)
See Also
--------
SparseRandomProjection
"""
@_deprecate_positional_args
def __init__(self, n_components='auto', *, eps=0.1, random_state=None):
super().__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : {ndarray, sparse matrix} of shape \
(n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
random_state = check_random_state(self.random_state)
return _gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection.
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int or 'auto', default='auto'
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float or 'auto', default='auto'
Ratio in the range (0, 1] of non-zero component in the random
projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : float, default=0.1
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'. This value should be strictly positive.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : bool, default=False
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
n_components_ : int
Concrete number of components computed when n_components="auto".
components_ : sparse matrix of shape (n_components, n_features)
Random matrix used for the projection. Sparse matrix will be of CSR
format.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
Examples
--------
>>> import numpy as np
>>> from sklearn.random_projection import SparseRandomProjection
>>> rng = np.random.RandomState(42)
>>> X = rng.rand(100, 10000)
>>> transformer = SparseRandomProjection(random_state=rng)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(100, 3947)
>>> # very few components are non-zero
>>> np.mean(transformer.components_ != 0)
0.0100...
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://users.soe.ucsc.edu/~optas/papers/jl.pdf
"""
@_deprecate_positional_args
def __init__(self, n_components='auto', *, density='auto', eps=0.1,
dense_output=False, random_state=None):
super().__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int
Dimensionality of the target projection space.
n_features : int
Dimensionality of the original source space.
Returns
-------
components : {ndarray, sparse matrix} of shape \
(n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return _sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
| bsd-3-clause |
HappyPhysics/Designer-Motion | Many_Triangles.py | 1 | 17080 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 24 10:39:08 2017
@author: Salem
This script starts with a lattice and then divides it into a bunch of triangles.
Then for each triangle it calls Little-Triangle and implements the addition of gray matter to
each triangle separately. Before calling Little-Triangle a change of frame must be implemented.
Methods:
find_desired_lattice(disp_type=DispType.random):
Finds the lattice that has the desired motion as lowest energy mode
rotation_matrix(angle):
rotates a vector by angle degrees
angle_between(vec1, vec2):
angle from vec1 to vec2
"""
import numpy as np
import numpy.random as npr
import LatticeMaking
import Little_Triangle
from numpy import linalg as la
from matplotlib import pyplot as plt
import importlib
importlib.reload(LatticeMaking)
importlib.reload(Little_Triangle)
from LatticeMaking import * #custom
from Little_Triangle import * #custom
# take in a triangulated mesh.
# generate a list or an array of triangles with elements (indx1, indx2, indx3)
# take in the desired displacements of each triangle.
# transform the displacements to a canonical frame of reference.
# run little_triangle on them.
# store the results of all the little triangles.
# combine the triangles back to a single mesh adding the extra vertices and edges.
# minimize the cost function for the whole lattice as a function of the original edges (optional).
PI = np.pi
# take in a triangulated lattice.
LATTICE_WIDTH = 3 # same as height
TRI_LATTICE = triangular_lattice(LATTICE_WIDTH) #contains vertices and edges as tuple
#===========================================================================================================================================
# A test lattice for changing a transverse wave to a longitudinal one
#===========================================================================================================================================
def wave_changer():
verts = np.array([[0.0, 0.0], [0.0, 1.0], [np.sin(PI/3), 0.5], [np.sin(PI/3), -0.5]])
edges = np.array([[0, 1], [0, 2], [0, 3], [1,2], [2,3]], dtype=np.int64)
mesh = [verts, edges]
disp = np.array([[0.0, 1.0], [0.0, -1.0], [-1.0, 0.0], [1.0, 0.0]])
test = True;
while(test):
res = find_desired_lattice(mesh=mesh, desired_disp=disp, edgeType=EdgeTypes.all_to_gray)
test = check_answer(res)
return res
#===========================================================================================================================================
# Finds the lattice that has the desired motion as lowest energy mode
#===========================================================================================================================================
def find_desired_lattice(disp_type=DispType.random, edgeType = EdgeTypes.all_connected,
num_of_added_verts= 3, mesh = list(TRI_LATTICE), desired_disp = None):
print("\n")
num_of_verts = mesh[0].size//2
mesh1 = mesh.copy()
# define the desired displacement field of the entire lattice
if desired_disp is None:
desired_disp = make_desired_disp(mesh1[0], DeformType=disp_type, num_of_vertices=num_of_verts)
desired_disp = np.hstack(([0,0,0], desired_disp)).reshape((num_of_verts, 2))
# take in an edge list, generate a list or an array of triangles with elements (indx1, indx2, indx3)
triangles = get_mesh_faces(mesh1[1])
print("The mesh contrains the triangles: ", triangles)
results = []
if(edgeType == EdgeTypes.all_to_gray):
mesh1[1] = np.ones((0,2), dtype=np.int64)
springConstants = np.ones(0) # no springs started on the original mesh
else:
springConstants = np.ones(mesh1[1].size//2)
total_added = 0
# run over all the triangeles or faces of the mesh.
for tri_ID, triangle in enumerate(triangles):
triangle_verts = np.array(mesh1[0][triangle])
#get the indices of the triangle as a mask and use it to return the desired displacements of the triangle
triangle_disps = desired_disp[triangle].flatten()
# change thier frame of reference to the canonical one (from little triangle).
#new_tri_verts, canon_disps = lab_to_triangle_frame(triangle_disps, triangle_verts)
# call little triangle to add the gray matter
res = find_desired_face(num_of_added_verts=num_of_added_verts, face_Disps=triangle_disps,
face_verts=triangle_verts, face_ID=tri_ID + 1, edgeType=edgeType)
add_res_to_mesh(mesh1, res, triangle, np.arange(num_of_verts + total_added, num_of_verts + total_added + res[0].size//2))
total_added += res[0].size//2
#the spring constants they are not squared anymore, they will be squared when the energy is evaluated
springConstants = np.hstack((springConstants, res[2]))
results.append(res)
#handle edges
#add the grey matter on the end of the lattice.
#increment the corresponding edges in the edge array
#add in the correct spring constants
print("\n")
#springConstants /= np.max(springConstants)
mask = springConstants**2 < 0.01
#show vertices
plotPoints(mesh1[0], num_of_originals=num_of_verts)
#spring constants are normalized so that the maximum value is one
return results, mesh1, springConstants
#===========================================================================================================================================
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#this enumumerates the possible outputs of get_triangle_indices
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class OutType(Enum):
mask = 1
indices = 2
#TRI_LATTICE = 3
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#===========================================================================================================================================
# puts the triangle displacements in canonical form
#===========================================================================================================================================
def get_triangle_indices(indx, num_of_verts, output = OutType.mask):
"""
description
"""
mask = np.zeros(num_of_verts, dtype=np.bool)
#ignore the indices at the boundaries that are not at the Lower-Left corner of a triangle
if np.mod(indx + 1, LATTICE_WIDTH) == 0 or indx >= LATTICE_WIDTH *(LATTICE_WIDTH - 1):
#return nothing because they do not represent a triangle
return None
if output is not OutType.mask:
return np.array([indx, indx + 1, indx + LATTICE_WIDTH, indx + LATTICE_WIDTH + 1])
# gets the indices of the triangle corresponding to indx (indx of lower left corner)
# start with indx, then find the indices of the other three
mask[indx] = mask[indx + 1] = mask[indx + LATTICE_WIDTH] = mask[indx + LATTICE_WIDTH + 1] = True
return mask
#===========================================================================================================================================
# return thier positions and displacements to change frame.
#===========================================================================================================================================
# puts the triangle displacements in canonical form
#===========================================================================================================================================
def lab_to_triangle_frame(triangle_disps, triangle_verts, direction = 1):
"""
Translates between the lab and triangle frames. triangle frame is the natural frame introduced in the script Little_triangle.
Input:
direction: gives the direction of the transformation. If direction is +1 the transformation goes from lab to triangle frame.
if direction is -1 then the transformation goes the other way
(note that the outputs will have different shapes because of flattening and reshaping)
"""
# find the triangle vertices in canonical for, [0,0], [0, y],...
new_verts = np.copy(triangle_verts)
new_verts -= new_verts[0]
new_verts = np.dot(new_verts, rotation_matrix(angle_between([0, 1],new_verts[1])))
#for disps, find the amount needed to be rotated.
#subtract the displacement of the first vertex from all the displacements.
new_disps = triangle_disps - triangle_disps[0]
#rotation vectors, these are vectors normal to the position vector (starting from the pivot point)
rotation_vecs = np.dot((new_verts), rotation_matrix(-PI/2)) # C.C. rotation
print(rotation_vecs)
# find (negative of ) the component of the first displacement perpendicular to the first edge.
# find the ratio of length of this component to the length of the first edge, this is the rotation parameter.
rotation_param = -np.dot(new_disps[1], rotation_vecs[1])/(la.norm(rotation_vecs[1])**2)
#rotate the right amount
#start with the triangle positions. (DONE ABOVE)
#subtract the first position from all of them. (DONE ABOVE)
# rotate the result by 90 degrees. (DONE ABOVE)
# multiply by the rotation parameter.
#add to displacements.
new_disps += rotation_param*rotation_vecs
# return the displacemetns in canonical form.
return new_verts, new_disps.flatten()[3:]
#===========================================================================================================================================
#===========================================================================================================================================
# changes from the canonical form of the triangle back to the "lab" frame (acts on vertex positions)
#===========================================================================================================================================
def back_to_lab_frame(output, triangle_verts):
"""
translates the gray matter back to lab frame
"""
#subtract the displacement of the first vertex from all the displacements.
output += triangle_verts[0]
output = np.dot(output, rotation_matrix(-angle_between([0, 1], triangle_verts[1])))
return output
#===========================================================================================================================================
#===========================================================================================================================================
# rotates a vector by angle degrees
#===========================================================================================================================================
def rotation_matrix(angle):
"""
rotates a vector by angle degrees in the counter clockwise direction. (2 by 2)
"""
return np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
#===========================================================================================================================================
import warnings
#===========================================================================================================================================
# angle from vec1 to vec2
#===========================================================================================================================================
def angle_between (vec1, vec2):
"""
Angle from vec1 to vec2.
This gives the angle you have to rotate vec1 to make it parallel to vec2, could be negative.
"""
norm1 = la.norm(vec1)
norm2 = la.norm(vec2)
if (norm1 == 0 or norm2 == 0):
warnings.warn("Warning, angle between zero vector assumed to be pi/2 ")
return np.pi/2
#find the angles with respect to the x axis then take the different. us x hat helps get the sign right
return np.arccos(np.dot(vec2, [1, 0])/(norm2))*np.sign(vec2[1]) - np.arccos(np.dot(vec1, [1, 0])/(norm1))*np.sign(vec1[1])
#===========================================================================================================================================
#===========================================================================================================================================
# adds the new results to the original mesh
#===========================================================================================================================================
def add_res_to_mesh(mesh, res, triangle, new_verts):
'''
adds the extra vertices from res to the mesh. And adds the edges too after replacing the values of the indices
by the order in which they appear in the new mesh
'''
mesh[0] = np.vstack((mesh[0], res[0]))
a = res[1]
#print("edges: ", a)
palette = np.arange(0, 3 + new_verts.size)
#print("palette: ", palette)
key = np.hstack((triangle, new_verts))
#print("key: ", key)
#print("a.ravel:", a.ravel())
index = np.digitize(a.ravel(), palette, right=True)
#print("index: ", index)
#print(key[index].reshape(a.shape))
mesh[1] = np.vstack((mesh[1], key[index].reshape(a.shape)))
#===========================================================================================================================================
#===========================================================================================================================================
# check the final answer
#===========================================================================================================================================
def check_answer(result):
'''
checks whether the results returned match the desired dispalcement. The desired displacements are given explicitly for now.
'''
face_verts = np.array([[0.0, 0.0], [0.0, 1.0], [np.sin(PI/3), 0.5], [np.sin(PI/3), -0.5]])
desired_disp = np.array([ 0. , 1. , 0. , -1. , -1. ,
0. , 1. , 0. , 0.83382341, 0.56617335,
0.53577003, 0.39596225, 0.55284973, 0.15255589, 0.85216995,
0.05556033, 0.10565699, 0.17175687, 0.42789355, 0.83339344])
mesh = result[1].copy()
k = result[2].copy()
#print("spring constants:" , k**2/max(k**2))
mask = k**2/max(k**2) < 0.01
dyn_mat = makeDynamicalMat(verts=mesh[0], edgeArray=mesh[1], springK=k)
lowestEigVector = normalizeVec(la.eigh(dyn_mat)[1][:,3])
# project out the euclidean transforms
euclid_transforms = get_rigid_transformations(mesh[0])
euclid_projector = get_complement_space(euclid_transforms)
projected_disp = normalizeVec(np.dot(euclid_projector, desired_disp.flatten()))
disp, disp_energy= check_energy(desired_disp[:face_verts.size], mesh, k)
projected_disp = normalizeVec(np.dot(euclid_projector, disp))
print("eigenvalues: ", lowestEigenVals(dyn_mat, 2, 4))
print("result energy: " , disp_energy)
dot_product = np.dot(projected_disp[:8], lowestEigVector[:8])
print("desired dot product: ", dot_product, "\n\n")
projected_disp[:8] *= np.sign(dot_product)
print("differece:", (projected_disp[:8] - lowestEigVector[:8]) )
if dot_product*np.sign(dot_product) < 0.995: return True
return False;
def check_energy(original_disps, mesh, k):
num_of_verts = mesh[0].size//2
disps = npr.rand(num_of_verts*2)
disps[:original_disps.size] = original_disps
euclid_transforms = get_rigid_transformations(mesh[0])
euclid_projector = get_complement_space(euclid_transforms)
projected_disp = normalizeVec(np.dot(euclid_projector, disps))
# project out the euclidean transforms
dyn_mat = makeDynamicalMat(verts=mesh[0], edgeArray=mesh[1], springK=k)
res = op.minimize(energy, projected_disp, method='Newton-CG', args=(dyn_mat, euclid_projector, original_disps), jac=energy_Der,
hess=energy_Hess, options={'xtol': 1e-8, 'disp': False})
return res.x, res.fun
| mit |
ndingwall/scikit-learn | sklearn/utils/tests/test_random.py | 10 | 7360 | import numpy as np
import pytest
import scipy.sparse as sp
from scipy.special import comb
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import _random_choice_csc, sample_without_replacement
from sklearn.utils._random import _our_rand_r_py
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
with pytest.raises(ValueError):
sample_without_replacement(5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
with pytest.raises(ValueError):
sample_without_replacement(0, 1)
with pytest.raises(ValueError):
sample_without_replacement(1, 2)
# n_population == n_samples
assert sample_without_replacement(0, 0).shape == (0, )
assert sample_without_replacement(1, 1).shape == (1, )
# n_population >= n_samples
assert sample_without_replacement(5, 0).shape == (0, )
assert sample_without_replacement(5, 1).shape == (1, )
# n_population < 0 or n_samples < 0
with pytest.raises(ValueError):
sample_without_replacement(-1, 5)
with pytest.raises(ValueError):
sample_without_replacement(5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert len(s) == n_samples
unique = np.unique(s)
assert np.size(unique) == n_samples
assert np.all(unique < n_population)
# test edge case n_population == n_samples == 0
assert np.size(sample_without_replacement(0, 0)) == 0
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = comb(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = _random_choice_csc(n_samples, classes, class_probabilities,
random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilities = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = _random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = _random_choice_csc(n_samples, classes, class_probabilities,
random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilities[k])) / n_samples
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilities = [np.array([0.0, 1.0]), np.array([1.0])]
got = _random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilities is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
def test_our_rand_r():
assert 131541053 == _our_rand_r_py(1273642419)
assert 270369 == _our_rand_r_py(0)
| bsd-3-clause |
robintw/Py6S | doc/source/conf.py | 1 | 8291 | # -*- coding: utf-8 -*-
#
# Py6S documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 16 12:07:44 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"scipy.interpolate",
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, "C:\_Work\Py6S\py6s")
sys.path.insert(0, "/Users/robin/Documents/University/Py6S/py6s")
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Py6S"
copyright = "2012, Robin Wilson"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.9.0"
# The full version, including alpha/beta/rc tags.
release = "1.9.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Py6Sdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Py6S.tex", "Py6S Documentation", "Robin Wilson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "py6s", "Py6S Documentation", ["Robin Wilson"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Py6S",
"Py6S Documentation",
"Robin Wilson",
"Py6S",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
autodoc_member_order = "bysource"
| lgpl-3.0 |
ResByte/SLAM | mit_data.py | 1 | 3600 | import random
from math import *
import sys
import matplotlib.pyplot as plt
import numpy as np
"""
Define a robot class.
"""
class robot:
def __init__(self):
self.x = random.random()*world_size
self.y = random.random()*world_size
self.orientation = random.random()*1.0*pi
self.forward_noise = 0.0
self.turn_noise = 0.0
self.sense_noise= 0.0
def set(self,new_X,new_Y,new_PHI):
if new_X>= world_size:
print new_X
raise ValueError, 'X coordinate is out of range'
if new_Y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_PHI < -pi or new_PHI >= pi:
raise ValueError, 'orientation is out of bound'
self.x = float(new_X) + (world_size/2.0)
self.y = float(new_Y) + (world_size/2.0)
self.orientation= float(new_PHI)
def set_noise(self,new_f_noise,new_t_noise, new_s_noise):
self.turn_noise = float(new_t_noise)
self.sense_noise = float(new_s_noise)
self.forward_noise = float(new_f_noise)
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = landmarks[i]
dist += random.gauss(0.0,self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0 :
raise ValueError, 'robot cannot move backwards'
orientation = self.orientation+float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2*pi
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation)*dist)
y = self.y + (sin(orientation)*dist)
x%= world_size
y%= world_size
res = robot()
res.set(x,y,orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
return exp(-((mu-x)**2)/(sigma**2)/2.0)/sqrt(2.0*pi*(sigma**2))
def measurement_prob(self, measurement):
prob = 1.0
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0])**2 + (self.y- landmarks[i][1])**2)
prob +=self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x = %.6s y = %.6s orient=%.6s]' %(str(self.x),str(self.y),str(self.orientation))
"""
Read Odometry data from MIT dataset of Infinite Corridor
"""
def readMitData():
odom_data=[]
sonar_data=[]
flaser_data=[]
with open(sys.argv[1],'r') as data_file:
print"-> reading data"
for line in data_file:
a = line.strip().split()
if a[0]=='ODOM':
odom_data.append(a)
if a[0]=='SONAR':
sonar_data.append(a)
if a[0]=='FLASER':
flaser_data.append(a)
return odom_data,sonar_data,flaser_data
# create world
world_size = 1300
createWorld = np.zeros([world_size,world_size])
# initialize robot
myrobot = robot()
#print odo_data
x_co=[]
# get odometry data from file
odom, sonar, flaser = readMitData()
# get laser data from file
for i in flaser:
angle = float(i[184])
newx = float(i[182])
newy=float(i[183])
#print i[184]
myrobot.set(newx,newy,angle)
laser_array=[]
for n in range(180):
x_lm = float(i[n+2])*cos(float(n) + angle)
y_lm = float(i[n+2])*sin(float(n)+ angle)
x_world = round(myrobot.x+x_lm)
y_world = round(myrobot.y + x_lm)
# laser_array.append(float(i[n+2]))
if x_world >= world_size:
x_world = world_size-1
if y_world >= world_size:
y_world = world_size-1
#print x_world, y_world
# upadate map
ISM = 0.0
prior = 0.5
if i[n+2]<=20.0:
ISM = 1.0
if 20.0< i[n+2]<=50.0:
ISM = 0.5
if i[n+2]>50.0:
ISM = 0.0
createWorld[x_world][y_world]=createWorld[x_world][y_world] + ISM + prior
#odometry.append(float(i[184]))
#print laser_array
plt.imshow(createWorld,origin='lower')
plt.show()
| gpl-2.0 |
Aidan-Bharath/code_and_stuffs | v_save_vel.py | 1 | 8839 | from __future__ import division
import numpy as np
import sys
sys.path.insert(0,'/home/aidan/thesis/probe_code/fvcomprobe/const_panels/')
from structFunc import conv_time
import netCDF4 as net
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
def time_index(a):
a = a.reindex(index=a.index.to_datetime())
return a
def magnitude(a):
mag = np.sqrt(a['u']**2+a['v']**2+a['w']**2)
return mag
def theta(a):
a = np.arctan2(a['v'],a['u'])
return a
if __name__ == "__main__":
fileDir = '/home/aidan/thesis/ncdata/gp/2014/'
filename = 'dngrid_0001.nc'
siglay = np.array([0.98999,0.94999,0.86999,0.74999,0.58999,0.41000,0.25000,0.13000,0.05000,0.01000])
### BPb -TA
#elements = [17521, 18093, 18092, 18643, 19188, 19702, 20191, 20685, 21180, 21675, 22185, 22705, 23228, 23758, 24283, 24782, 25275, 25774, 26276, 26759, 27219, 27653, 28084, 28527]
## north gap
#elements = [ 9204, 9203, 8761, 8760, 8764, 8763, 8360, 7944, 7943, 7538, 7537, 7135, 7134, 7515, 7517, 7518, 7113, 7119, 7526, 7530, 7529, 7532, 7549, 7547, 7858, 7857, 7860, 7859, 7455, 7445, 7844, 8258, 8261, 8263, 8682, 8678, 8677, 9115, 9113, 9118, 9112, 9111, 9571, 10044, 10045, 10041, 10507, 10506, 10508, 10517, 11035, 11034, 11576, 11602, 12161, 12160, 12736, 12735, 13325, 13323, 13321, 13934, 14560, 15185, 15806, 16386, 16385, 16962, 17537, 18108, 18107, 18656, 18655, 19199, 19197, 19712, 20199, 20693, 21187, 21188, 21682, 21681, 22190, 22710, 23232, 23231, 23761, 23760, 24285, 24783, 25275, 25774, 26275, 26756, 27216, 27650, 28080, 28521, 28979, 29452, 29929, 30408, 30899, 31402, 31910, 32421, 32423, 32913, 32912, 33374, 33373, 33803, 33802, 34215, 34214, 34615, 35029, 35462, 35914, 36372, 36813, 37232, 37634, 37640, 38046, 38045, 38444, 38831, 39202, 39553, 39896, 40244, 40594, 40593, 40955, 40954, 41319, 41679, 42053, 42435, 42827, 43226, 43225, 43625, 43624, 44029, 44443, 44845, 45223, 45227, 45593, 45972, 46361, 46364, 46751, 46752, 47140, 47542, 47957, 48361, 48360, 48764, 49170, 49585, 49584, 50007, 50006, 50435, 50875, 51317, 51761, 52212, 52670, 53143, 53615, 53614, 54084, 54574, 54573]
### south passage
# top
#elements = [41420, 41421, 41422, 41787, 41425, 41426, 41427, 41428, 41429, 41430, 41431, 41063, 41064, 40698, 40699, 40700, 40701, 40702, 40703, 40348, 40349, 40350, 40351, 40352, 40007, 40008, 39664, 39665, 39666, 39311, 39312, 39670, 39671, 39672, 39673, 39675, 39676, 39677, 39678, 39679, 39326, 39327, 39328, 39329, 39330, 39332, 39333, 39334, 38959, 38960, 38961, 38962, 38567, 38568, 38569, 38570, 38571, 38166, 38167, 38168, 38169, 38170, 38171, 38172, 38173, 38581, 38176, 38177, 38178, 38179, 38180, 38181, 38182, 37775, 37776, 37777, 37778, 37779, 38189, 38192, 38193, 38194, 38603, 38604, 38999, 39001, 39004, 39005, 38611, 38612, 38613, 38614, 38615, 38616, 39013, 39014, 39016, 39017, 39386, 39390, 39391, 39392, 39748, 39749, 39750, 39751, 39752, 39753, 39754, 39402, 39403, 39404, 39759, 39760, 39762, 39763]
# bottom
#elements = [48484, 48485, 48885, 48886, 49295, 49296, 49717, 49718, 49299, 49300, 49301, 49302, 49303, 49725, 49726, 49727, 49728, 50156, 49732, 49733, 49734, 49735, 49736, 49737, 49317, 49318, 49319, 49742, 49743, 50169, 49746, 49747, 49748, 49749, 49750, 49751, 49752, 50177, 50178, 50613, 50181, 50182, 50183, 50616, 50620, 50621, 50188, 50189, 50190, 50191, 50192, 50193, 50194, 50195, 50196, 50197, 49773, 49774, 50201, 50202, 50203, 50204, 50205, 50206, 50641, 50642, 50643, 50644, 50645, 50214, 50215, 50216, 50217, 50218, 50653, 50654, 50655, 50656, 51097, 51098, 51099, 51100, 51101, 51102, 51103, 51104, 51105, 51106, 51107, 51108, 51555, 51556, 51557, 51558, 51559, 51560, 52009, 52010, 52011, 52012, 52469, 52016, 52017, 52018, 52019, 52020, 52022, 52023, 52024, 52479, 52482, 52483, 52484, 52485, 52486, 52951, 52952, 52953, 52492, 52493, 52494, 52958, 52961, 52962, 52963, 52964, 53435, 53436, 53438, 53439, 53441]
### north pass
# far top
#elements = [44099, 43692, 43691, 43690, 43684, 43285, 43277, 43276, 42874, 42873, 42872, 42868, 42867, 42472, 42471, 42091, 42090, 42089, 42088, 42087, 42086, 42085, 42084, 42082, 42081, 42080, 42079, 42078, 42077, 42076, 42075, 42454, 42453, 42452, 42451, 42450, 42449, 42448, 42447, 42446, 42445, 42443, 42442, 42059, 42058, 42056, 42055, 42054, 42053, 42052, 42051, 42050, 42049, 42048, 42047, 42046, 42045, 42044, 42043, 42042, 42041, 42422, 42421, 42420, 42419, 42418, 42033, 42032, 42030, 42029, 42028, 42410, 42409, 42799, 42798, 42797, 42404, 42403, 42402, 42790, 42794, 43186, 43185, 43184, 43183, 43182, 43181, 43180, 43579, 43578, 43580, 43981, 43980, 43979, 44392, 44391, 44791, 44790, 44789, 44787, 44786, 44785, 44784, 44783, 44782, 44781, 44780, 44779, 45159, 44776, 44775, 44773, 44772, 44774, 45150, 45151, 45521, 45520, 45519, 45518, 45517, 45515, 45514, 45516, 45890, 45889, 45888, 45887, 45886, 45885, 45884, 45883, 45882, 45881, 45880, 45879, 45878, 45877, 45876, 45875, 45874, 46261, 46259, 46258, 46260, 46254, 46255, 45865, 45864]
# next to top
elements = [31433, 31432, 31430, 31429, 31428, 30922, 30921, 30920, 30427, 30426, 30421, 29941, 29942, 29463, 29462, 29460, 29459, 29458, 29457, 28983, 28982, 28980, 28979, 28978, 28977, 28976, 28975, 28974, 28973, 28972, 28969, 28510, 28509, 28066, 28065, 27634, 27633, 28062, 27630, 27629, 27194, 27193, 27192, 27191, 27189, 27188, 27187, 27186, 27185, 27183, 27182, 27181, 27614, 27613, 27612, 27611, 27610, 27609, 27608, 27607, 27606, 27605, 27604, 27603, 27602, 27166, 27165, 27598, 27597, 27596, 27595, 27594, 27593, 27592, 27591, 27590, 27589, 27588]
### top pass
#elements = [27241, 27240, 27239, 27238, 27237, 26777, 26776, 26292, 26291, 26287, 26286, 25783, 25782, 25284, 25283, 25279, 25278, 25277, 25276, 25275, 25274, 25273, 25272, 25271, 25270, 25269, 25268, 25267, 25266, 25265, 25264, 25263, 25261, 25260, 25259, 24768, 24767, 24764, 24763, 24762, 24761, 24760, 24259, 24258, 24257, 24256, 24254, 24253, 24252, 24251, 24250, 24249, 23723, 23722, 23721, 24243, 24242, 24241, 24240, 24239, 24237, 24236, 24235, 24234, 23708, 23707, 23706, 24229, 24228, 24227, 24226]
### middle pass
#elements = [21706, 21218, 21217, 21216, 20720, 20719, 20718, 20715, 20714, 20713, 20712, 20710, 20709, 20708, 20707, 20706, 20705, 20210, 20209, 19719, 19718, 19715, 19200, 19199, 19198, 18651, 18650, 18649, 18648, 18647, 18646, 18645, 18644, 18092, 18091, 18090, 18089, 18088, 18087, 18086, 18085, 18084, 18083, 17509, 17508, 17507, 17506, 17504, 17503, 16927, 16926, 16350, 16349, 16348, 16347, 16346, 16345, 16344, 16343, 16342, 16340, 16339, 16341, 16910, 16911, 17478, 17477, 17476, 17475, 16903, 16902, 16901, 17468, 17467, 18041, 17463, 17462, 17461, 17459, 17458]
### bottom pass
#elements = [14587, 13965, 13964, 13963, 13352, 13351, 12762, 12761, 12758, 12753, 12183, 12178, 11621, 11618, 11617, 11070, 11069, 10544, 10543, 10048, 10047, 9572, 9571, 9570, 9569, 9568, 9105, 9104, 8664, 8663, 8239, 8238, 8237, 7822, 7821, 7820, 7807, 7802, 7396, 7395, 6988, 6987, 6605, 6604, 6603, 6598, 6218, 6217, 6216, 5836, 5835, 5834, 5833, 5824, 5823, 5822, 5821, 5817, 5816, 5454, 5453, 5452, 5451, 5450, 5449, 5447, 5446, 5445, 5444, 5443, 5442, 5441, 5801, 5800, 5799, 5798, 5797, 5434, 5433, 5432, 5431, 5430, 5429, 5074, 5073, 5072, 5071, 5070, 5069, 5068, 5419, 5418, 5417, 5416]
nc = net.Dataset(fileDir+filename).variables
time = nc['time'][:]+678942
time = np.array([time]).transpose()
time = conv_time(time)
time = time.flatten()
_,a = np.where([time == '2014-02-02 03:39:59'])
_,b = np.where([time == '2014-02-02 13:49:59'])
array = np.linspace(a[:],b[:],b[:][:]-a[:][:]+1)
el = elements
for i,j in enumerate(array):
print i
j = int(j)
vel = np.sqrt(nc['u'][j,:,el]**2+nc['v'][j,:,el]**2+nc['ww'][j,:,el]**2)
lat = nc['latc'][el]
lon = nc['lonc'][el]
line = lon
fig,ax = plt.subplots()
plt.rc('font',size='22')
levels = np.linspace(0,3,31)
cs = ax.contourf(line,siglay,vel,levels=levels)
ax.contour(line,siglay,vel,cs.levels,colors='k',hold='on')
cbar = fig.colorbar(cs,ax=ax)
cbar.set_label(r'Velocity $(m/s)$', rotation=-90,labelpad=30)
plt.title(str(time[j]))
ax.set_xlabel('Longitude')
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
ax.xaxis.set_major_formatter(ticks)
ax.yaxis.set_major_formatter(ticks)
ax.get_xaxis().set_visible(False)
#ax.get_yaxis().set_visible(False)
plt.savefig("/home/aidan/thesis/defence_presentation/v_spat/spat-"+str(i)+".jpg",format='jpeg')
if i == 0:
plt.show()
plt.close()
| mit |
googleapis/python-monitoring | noxfile.py | 1 | 6975 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install("asyncmock", "pytest-asyncio", "-c", constraints_path)
session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
session.install("-e", ".[pandas]", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google/cloud",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".[pandas]", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
# "-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tests/test_patches.py | 3 | 9654 | """
Tests specific to the patches module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.collections as mcollections
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtrans.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@cleanup(style='default')
def test_patch_color_none():
# Make sure the alpha kwarg does not override 'none' facecolor.
# Addresses issue #7478.
c = plt.Circle((0, 0), 1, facecolor='none', alpha=1)
assert c.get_facecolor()[0] == 0
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@cleanup
def test_patch_linestyle_accents():
#: Test if linestyle can also be specified with short menoics
#: like "--"
#: c.f. Gihub issue #2136
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
linestyles = ["-", "--", "-.", ":",
"solid", "dashed", "dashdot", "dotted"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ls in enumerate(linestyles):
star = mpath.Path(verts + i, codes)
patch = mpatches.PathPatch(star,
linewidth=3, linestyle=ls,
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
ax.set_xlim([-1, i + 1])
ax.set_ylim([-1, i + 1])
fig.canvas.draw()
assert True
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in six.iteritems(param_dict):
assert_equal(getattr(w, attr), old_v)
getattr(w, func)(new_v)
assert_equal(getattr(w, attr), new_v)
@image_comparison(baseline_images=['wedge_range'],
remove_text=True)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
SixTrack/SixTrack | test/elensidealthin6d/elens_plot_kick.py | 1 | 1663 | import matplotlib.pyplot as plt
import numpy as np
r2hel1=6.928 # from fort.3 [mm]
sig=r2hel1/6 # 1 sigma beam size, hel1 between 4-6 sigma
offsetx=-1.1547
offsety=-2.3093
theta_r2=4.920e-03 # max. kick [mrad]
oFile=open('kicks.dat','w')
plt.figure('elens kick',figsize=(13,13))
for fnin,fnout,offx,offy,R,R2f,peakT in [(1,2,0,0,0.5,7,7),(2,3,offsetx,offsety,1,12,10.8),(3,4,-offsetx,0,1,5,2.91604),(4,5,0,-offsety,1/2.,3,3.48995)]:
theta_max=theta_r2*R
plt.subplot(2,2,fnin)
helin=np.loadtxt('HEL_DUMP_%s'%fnin)
helout=np.loadtxt('HEL_DUMP_%s'%fnout)
rrin=np.sqrt((helin[:,3]-offx)**2+(helin[:,5]-offy)**2)
rrout=np.sqrt((helout[:,3]-offx)**2+(helout[:,5]-offy)**2)
if np.max(rrin-rrout)==0:
fff=np.sqrt((helin[:,4]-helout[:,4])**2+(helin[:,6]-helout[:,6])**2)
plt.plot(rrin/sig,fff,'.',label=r'offx=%2.3f sigma,offy=%2.3f sigma'%(offx/sig,offy/sig))
plt.plot(rrin/sig,np.ones(len(rrin))*theta_max,'k-',label=r'$\theta_{R_2}$')
plt.plot([R2f,R2f],[0,theta_max*1.1],'g-',label=r'$R_2$')
plt.plot([peakT,peakT],[0,max(fff)*1.05],'r-',label=r'$n_{\mathrm{peak}}$')
plt.xlabel(r'$n_{\sigma}=\sqrt{(x-x_{\mathrm{off}})^2+(y-y_{\mathrm{off}})^2)}$ [$\sigma$]')
plt.ylabel(r'$\theta(r)=\sqrt{xp^2+yp^2}$ [mrad]')
plt.legend(loc='best',fontsize=10)
plt.tight_layout()
plt.grid()
oFile.write('# %i %i \n'%(fnin,fnout))
for tmpR,tmpF in zip(rrin,fff):
oFile.write(' % 22.15E % 22.15E % 22.15E \n'%(tmpR,tmpR/sig,tmpF))
oFile.write('\n\n')
else:
print 'x or y has been changed in %s / %s - elens should only change xp,yp'%('HEL_DUMP_%s'%fnin,'HEL_DUMP_%s'%fnout)
oFile.close()
plt.show()
| lgpl-2.1 |
shusenl/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
beomyeol/models | autoencoder/MaskingNoiseAutoencoderRunner.py | 10 | 1689 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
dropout_probability = 0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| apache-2.0 |
glennq/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 87 | 2510 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
sibis-platform/ncanda-datacore | scripts/reporting/ncanda_quality_control_script.py | 4 | 14366 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
ncanda_quality_control_script
======================
This script checks the quality of the data for the NCANDA Project on REDCap.
Call script on command line.
Example Usage:
python ncanda_quality_control_script.py -v "baseline_visit_arm_1"
"""
import os
import sys
import json
import datetime
import csv
import redcap
import math
import pandas as pd
import sibis
fields = ['study_id', 'redcap_event_name','exclude', 'visit_ignore',
'visit_date', 'dob', 'cnp_test_sessions_dob','saliva_missing',
'saliva_1_collected','saliva_1_date','saliva_2_collected','saliva_2_date',
'saliva_3_collected','saliva_3_date','saliva_4_collected',
'saliva_4_date','youthreport1_missing','youthreport1_date',
'youthreport1b_missing', 'youthreport1b_date','youthreport2_missing',
'youthreport2_date','youthreport2_yid2', 'youthreport1_yid2',
'parentreport_missing','parentreport_date','ssage_youth_missing',
'ssage_youth_date', 'lssaga1_youth_missing','lssaga1_youth_date',
'lssaga1_parent_missing','lssaga1_parent_date','bio_np_missing',
'bio_np_date','dd1000_missing','dd1000_date','dd100_missing',
'dd100_date','np_wrat4_missing','np_wrat4_wr_raw','np_gpeg_missing',
'np_gpeg_exclusion','np_gpeg_dh_time','np_gpeg_ndh_time',
'np_reyo_missing','np_reyo_copy_time','np_reyo_qc(completed)',
'np_atax_missing','np_atax_sht_trial1','np_wais4_missing',
'np_wais4_rawscore','np_wais4_rawscore_computed',
'np_wais4_rawscore_diff(correct)','pasat_missing','pasat_date',
'cnp_missing','cnp_test_sessions_dotest','stroop_missing',
'stroop_date','mrireport_missing','mrireport_date',
'mr_session_report_complete']
form_fields = [['youthreport1_missing','youthreport1_date'],
['youthreport1b_missing', 'youthreport1b_date'],
['youthreport2_missing', 'youthreport2_date'],
['parentreport_missing','parentreport_date'],
['ssage_youth_missing','ssage_youth_date'],
['lssaga1_youth_missing','lssaga1_youth_date'],
['lssaga1_parent_missing','lssaga1_parent_date'],
['bio_np_missing', 'bio_np_date'],
['dd1000_missing','dd1000_date'],
['dd100_missing','dd100_date'],
['np_wrat4_missing','np_wrat4_wr_raw'],
['np_reyo_missing','np_reyo_copy_time'],
['np_atax_missing','np_atax_sht_trial1'],
['np_wais4_missing', 'np_wais4_rawscore'],
['pasat_missing','pasat_date'],
['cnp_missing','cnp_test_sessions_dotest'],
['stroop_missing','stroop_date']]
np_gpeg_fields = [['np_gpeg_exclusion___dh','np_gpeg_dh_time'],
['np_gpeg_exclusion___ndh','np_gpeg_ndh_time']]
saliva_fields = [['saliva_1_collected','saliva_1_date'],
['saliva_2_collected','saliva_2_date'],['saliva_3_collected',
'saliva_3_date'],['saliva_4_collected','saliva_4_date']]
fields_sex = [['youthreport1_missing','youthreport1_yid2'],
['youthreport2_missing','youthreport2_yid2']]
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key, verify_ssl=False)
return project_entry
def data_entry_fields(fields,project,arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields, format='df',
events=arm)
return data_entry_raw
def check(check, error):
if check:
error.append(check)
def missing_form(idx,row,field_missing, field_value):
"""
Generates a report indicating which Forms have not been entered onto redcap
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get(field_missing) != 1:
# for form_date, date is stored as a string
if type(row.get(field_value)) == float:
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
form_missing = field_missing,
event_name = idx[1],
error = 'ERROR: Form is missing')
return error
def np_groove_check(idx,row,field_missing, field_excluded, field_value):
"""
Checks to see if the Grooveboard NP is missing
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
# np is not missing if field_missing if value nan or zero
if row.get(field_excluded) == 0:
# np is not excluded if field_missing if value nan or zero
if row.get(field_missing) == 0 or math.isnan(row.get(field_missing)):
# for np_date, date is stored as a string
if type(row.get(field_value)) == float:
# If field is left blank, a NaN is put in it's place
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
np_missing = field_missing,
event_name = idx[1],
error = 'ERROR: NP is missing.'
)
return error
def fourteen_days_mri_report(idx,row):
"""
Generates a report indicating which MRI reports have no data after 14 days.
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
if row.get('mrireport_missing') != 1:
if type(row.get('mrireport_missing')) == str:
if datetime.datetime.strptime(row.get('mrireport_date'),'%Y-%m-%d') == datetime.date.today()-datetime.timedelta(days = 14):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: No MRI data after 14 days')
return error
def cnp_dob(idx,row):
"""
Checks to see if dob and cnp_test_sessions_dob match
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
if row.get('dob') == row.get('cnp_test_sessions_dob'):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: DOB and CNP_TEST_SESSIONS_DOB do not match.'
)
return error
def missing_mri_stroop(idx,row):
"""
Generate a report indicating which MRI Stroop have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# MRI Report is not missing if form_missing if value nan or zero
if row.get('mri_missing') != 1:
if row.get('redcap_data_access_group') == 'SRI' or row.get('redcap_data_access_group') == 'UCSD':
if row.get('mri_stroop_missing') == 0:
# for mri_stroop_date, date is stored as a string, if blank, defaults to NaN
if type(row.get('mri_stroop_date')) == float:
error = dict(subject_site_id = idx[0],
xnat_sid = row.get('mri_xnat_sid'),
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: MRI Stroop is missing'
)
return error
def missing_saliva_sample(idx,row,saliva_collected, saliva_date):
"""
Generate a report indicating which Saliva Samples have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# saliva_sample is not missing if saliva_sample_missing if value zero
if row.get('saliva_missing') != 1:
if row.get(saliva_collected) == 1:
# for form_date, date is stored as a string
if type(row.get(saliva_date)) == float:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
sample_missing = saliva_collected,
visit_notes = row.get('visit_notes'),
error = 'ERROR: Saliva Sample is missing'
)
return error
def visit_data_missing(idx,row):
"""
Generate a report indicating which Visit Dates are missing.
"""
error = dict()
if row.get('exclude') != 1:
if row.get('visit_ignore___yes') != 1:
if type(row.get('visit_date')) != str:
error = dict(subject_site_id = idx[0],
event_name = idx[1],
error = 'ERROR: Visit date missing.'
)
return error
def wais_score_verification(idx,row):
"""
Verifies whether the wais_rawscore was computed correctly.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get('np_wais4_missing') != 1:
if row.get('np_wais4_rawscore_computed') == row.get('np_wais4_rawscore_diff(correct)'):
if row.get('np_wais4_rawscore_diff(correct)') != 0:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: WAIS score is not verified'
)
return error
def youth_report_sex(idx,row, field_missing, field_sex):
"""
Checks whether or not sex was entered correctly in the Youth Report
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# np is not missing if field_missing if value nan or zero
if row.get(field_missing) != 1:
if row.get('sex') != row.get(field_sex):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
field = field_sex,
error = 'ERROR: SEX and SEX in YOUTHREPORT do not match.'
)
return error
def main(args):
project_entry = get_project_entry()
project_df = data_entry_fields(fields,project_entry,args.visit)
error = []
for idx, row in project_df.iterrows():
for f in form_fields:
check(missing_form(idx,row,f[0],f[1]),error)
for np in np_gpeg_fields:
check(np_groove_check(idx,row,'np_gpeg_missing',np[0],np[1]),error)
check(fourteen_days_mri_report(idx,row),error)
check(cnp_dob(idx, row),error)
check(missing_mri_stroop(idx, row),error)
for s in saliva_fields:
check(missing_saliva_sample(idx,row,s[0],s[1]),error)
check(visit_data_missing(idx,row),error)
check(wais_score_verification(idx,row),error)
for f in fields_sex:
check(youth_report_sex(idx,row,f[0],f[1]),error)
if args.csvdir:
for e in error:
if e == 'null':
error.remove(e)
with open(args.csvdir, 'wb+') as fi:
f = csv.writer(fi)
f.writerow(["subject_site_id", "visit_date", "event_name", "error"])
for x in error:
f.writerow([x["subject_site_id"],
x["visit_date"],
x["event_name"],
x["error"]])
else:
for e in error:
if e != 'null':
#print json.dumps(e, sort_keys=True)
#print "{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e
sibis.logging("{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e_dictionary=e)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v','--visit', default=['baseline_visit_arm_1', '1y_visit_arm_1'],
help='Select which visit the QC script runs on',)
parser.add_argument( "-c","--csvdir", action="store",
help="Directory where CSV will be stored.")
argv = parser.parse_args()
sys.exit(main(args=argv))
| bsd-3-clause |
yudingding6197/fin_script | self_define.py | 1 | 5827 | #!/usr/bin/env python
# -*- coding:utf8 -*-
#新增更新juchao交易tips
import sys
import re
import os
import time
import string
import datetime
import getopt
import pandas as pd
from internal.output_general import *
from internal.dfcf_inf import *
from internal.trade_date import *
from internal.inf_juchao.daily_trade_tips import *
def rt_quotes(dtFrame, source, qt_stage):
print(source)
for index,row in dtFrame.iterrows():
#print(row)
r1_len = len(row[1])
r1 = row[1].decode('gbk')
for i in range(10-r1_len):
r1 += ' '
if row['state']!='00':
line = "%06s %-s -- --" %(row[0], r1)
print (line)
continue
open = row['open']
pre_close = row['p_close']
price = row['price']
high = row['high']
low = row['low']
volume = int(row['volume'])
price_f = float(price)
pre_close_f = float(pre_close)
bidb_f = float(row['bidb'])
bidb_s = float(row['bids'])
if float(price)==0 or float(high)==0:
change = '-'
change_l = '-'
change_h = '-'
change_o = '-'
if bidb_f==0 and bidb_s==0:
pass
elif bidb_f!=0:
price_f = bidb_f
change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 )
elif bidb_s!=0:
price_f = bids_f
change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 )
else:
print("Error: Special Case", price, bidb, bids)
print(row)
else:
change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 )
change_l = '%02.02f'%( ((float(low)-pre_close_f)/pre_close_f)*100 )
change_h = '%02.02f'%( ((float(high)-pre_close_f)/pre_close_f)*100 )
change_o = '%02.02f'%( ((float(open)-pre_close_f)/pre_close_f)*100 )
str_fmt = "%06s %-s %6.2f(%6s%%) %8s(%6s) %8s(%6s)"
line = str_fmt %(row[0], r1, price_f, change, low, change_l, high, change_h)
print(line)
def index_follow_zd(head, index_ud):
if len(index_ud)>3:
zhang = index_ud[0]
ping = index_ud[1]
die = index_ud[2]
total = int(zhang) + int(ping) + int(die)
zh_per = int(zhang) * 100 / total
die_per = int(die) * 100 / total
print("%s %4s %4s %4s (%2d vs %2d) "%(head, zhang, ping, die, zh_per, die_per))
else:
print(head)
def index_info(df, show_idx, idxDict):
if df is None:
return
sh_info = ''
sz_info = ''
for index,row in df.iterrows():
if row[0] not in show_idx:
continue
open = float(row['open'])
close = float(row['close'])
preclose = float(row['preclose'])
if idxDict.has_key(row['code']):
head = "%8.2f(%6s)"%(close, row[2])
index_follow_zd(head, idxDict[row['code']])
else:
print("%8.2f(%6s)"%(close, row[2]))
def read_def(data_path, stockCode, stockCode_sn):
file = open(data_path, 'r')
if file is None:
print("Error open file", data_path)
return
if '_self_define' in data_path:
flag=0
lines = file.readlines(10000)
for line in lines:
line=line.strip()
if line=='STK':
flag=1
continue
elif flag==1 and line=='END':
break
if flag==0:
continue
code=line.strip()
if len(code)!=6:
continue;
if not code.isdigit():
continue;
stockCode.append(code)
ncode = sina_code(code)
stockCode_sn.append(ncode)
else:
line = file.readline()
while line:
if len(line)>=6:
code = line[0:6]
if code.isdigit():
stockCode.append(code)
ncode = sina_code(code)
stockCode_sn.append(ncode)
line = file.readline()
file.close()
def update_juchao_tips():
curr = datetime.datetime.now()
trade_date = get_lastday()
if curr.hour<9:
return
t_fmt = '%d-%02d-%02d'
cur_date = t_fmt%(curr.year, curr.month, curr.day)
today = datetime.date.today()
pre20Dt = today - datetime.timedelta(days=20)
pre20_date = t_fmt%(pre20Dt.year, pre20Dt.month, pre20Dt.day)
#print cur_date,pre20_date
fetch_jc_trade_tips(pre20_date, cur_date)
#Main
curdate = ''
data_path = "debug/_self_define.txt"
exclude = 0
show_flag = 0
stockCode = []
stockCode_sn = []
qt_stage = 0
if __name__=="__main__":
optlist, args = getopt.getopt(sys.argv[1:], '?f:en')
for option, value in optlist:
if option in ["-f","--file"]:
if value=='.':
data_path='../data/entry/miner/filter.txt'
else:
data_path=value
elif option in ["-e","--exclude"]:
exclude = 1
elif option in ["-n","--notice"]:
show_flag = 1
elif option in ["-?","--???"]:
print("Usage:", os.path.basename(sys.argv[0]), " [-f filename] [-e] [-t type]")
exit()
if not os.path.isfile(data_path):
print("No file:",data_path)
exit(0)
today = datetime.date.today()
curdate = '%04d-%02d-%02d' %(today.year, today.month, today.day)
#更新巨潮的tips信息
update_juchao_tips()
read_def(data_path, stockCode, stockCode_sn)
if show_flag==1:
list_latest_news(stockCode, curdate)
exit(0)
#当前时间对应情况
qt_stage = quotation_st()
#Index实时信息
qt_index = getHSIndexStat()
idxDict = {}
ret = get4IndexInfo(idxDict)
#show_idx = ['000001', '399001', '399005', '399006']
#idx_df=ts.get_index()
#index_info(idx_df, show_idx, idxDict)
show_idx = ['000001', '399001', '399005', '399006','399678']
show_real_index(show_idx)
#codeArray = ['399678']
#list_extra_index(codeArray)
column = []
create_column(column)
#print(column)
rt_list = []
realtime_price(stockCode_sn, rt_list)
#print(rt_list)
df = pd.DataFrame(rt_list, columns=column)
#print (df)
#df.set_index('code')
rt_quotes(df, '', qt_stage)
#Get self def from DFCF(DongCai)
rt_list = []
stockCode_sn = []
if exclude==0:
stock_array = []
getSelfDefStock(stock_array)
if len(stock_array)==0:
print "Fail to get self defined from DFCF"
exit()
stockCode = []
for i in stock_array:
stockCode.append(i[:6])
ncode = sina_code(i[:6])
stockCode_sn.append(ncode)
#print ("i===" + ncode)
realtime_price(stockCode_sn, rt_list)
df = pd.DataFrame(rt_list, columns=column)
rt_quotes(df, 'DFCF', qt_stage)
| gpl-2.0 |
averagehat/scikit-bio | skbio/stats/distance/tests/test_permanova.py | 13 | 4940 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
class TestPERMANOVA(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999],
name='PERMANOVA results')
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_call_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_call_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0],
name='PERMANOVA results')
obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_call_unequal_group_sizes(self):
exp = pd.Series(
index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
Technologicat/pydgq | doc/legtest2.py | 1 | 12702 | # -*- coding: utf-8 -*-
#
# Trying out the NumPy API for Legendre polynomials and Gauss--Legendre quadrature,
# with an eye toward the modern hierarchical (Lobatto) basis functions for Galerkin methods
# (B. Szabó, I. Babuška, Finite element analysis, John Wiley & Sons, 1991).
#
# JJ 2016-02-16
from __future__ import division, print_function, absolute_import
import time
import numpy as np
import scipy.integrate
import matplotlib.pyplot as plt
import pylu.dgesv as dgesv
class RandomPileOfTestStuff:
def __init__(self, q=15, tol=1e-8):
assert( q >= 2 ) # we don't have special case handling for q=1 in build_hierarchical_basis()
self.q = q # max polynomial degree for Legendre polynomials; number of basis functions for hierarchical basis (as in "dG(q)")
self.tol = tol # tolerance for nonzero check
self.P = None # Legendre polynomials
self.N = None # hierarchical basis functions (FEM, dG)
self.C = None # dG mass matrix for the first-order problem u' = f(u, t)
self.get_legendre_polynomials()
self.build_hierarchical_basis()
self.dgmass()
def get_legendre_polynomials(self):
q = self.q
P = []
# For each degree d, get the polynomial coefficients of a Legendre series
# that has only the dth degree term. Construct the corresponding Polynomial object.
#
# The coefficients are listed from the lowest order to highest.
#
for d in range(q):
# d zeroes followed by a one
#
series_coeffs = [ 0. ] * d
series_coeffs.append( 1. )
# coefficients for a standard power series 1, x, x**2, ...
#
c = np.polynomial.legendre.leg2poly( series_coeffs )
P.append( np.polynomial.Polynomial( c ) )
self.P = P
def build_hierarchical_basis(self):
assert( self.P is not None )
q = self.q
P = self.P
N = []
N.append( np.polynomial.Polynomial( [0.5, -0.5] ) ) # N_1, will become N[0] in the code, using Polynomial instead of explicit lambda gets us support for .deriv()
N.append( np.polynomial.Polynomial( [0.5, 0.5] ) ) # N_2
for j in range(2,q):
#N.append( np.sqrt( (2.*j - 1.)/2.) * P[j-1].integ(lbnd=-1, k=0) ) # surely this approach makes no numerical sense
# Explicit solution, using NumPy to evaluate the sum of Legendre polynomials.
#
# Much better (and still fast), but not nearly as accurate as evaluating using higher precision internally. See legtest3.py.
#
series_coeffs = [ 0. ] * (j-2)
series_coeffs.extend( [-1., 0., 1.] ) # -P_{j-2} + P_{j}
c = np.polynomial.legendre.leg2poly( series_coeffs )
Nj = np.polynomial.Polynomial(c) / np.sqrt( 2. * (2.*j - 1.) )
N.append( Nj )
self.N = N
# This numerical approach for generating the matrix is prone to roundoff and obsolete (not to mention stupid
# since we know that most of the matrix entries should be zero); see the analytical solution in legtest3.py.
#
def dgmass(self):
assert( self.N is not None )
q = self.q
N = self.N
C = np.empty( (q,q), dtype=np.float64 )
for i in range(q):
for j in range(q):
C[i,j] = scipy.integrate.quad( N[j].deriv(1)*N[i], -1., 1. )[0]
C[ np.abs(C) < self.tol ] = 0.0
C[0,0] += 1.0 # simulate the effect of the jump term (N_1 is the only function that is nonzero at xi=-1)
self.C = C
def main():
# Up to q=24, the full script works despite warnings from quad() in dgmass().
#
# For evaluating the hierarchical basis functions only (no dgmass()):
#
# q = 30, still sort of works, small deviations (1e-7) can be seen in the endpoint values of the few highest-order Nj
# q = 40, almost works, high-order Nj start getting wobbly
# q = 50, completely broken, out of precision
#
# By comparison, legtest3.py, which uses SymPy's mpmath (arbitrary precision floating point), works at least up to q=300, but is very slow.
#
stuff = RandomPileOfTestStuff(q=24, tol=1e-3)
# From the API docs for numpy.polynomial.legendre.leggauss:
# Computes the sample points and weights for Gauss-Legendre quadrature.
# These sample points and weights will correctly integrate polynomials of degree 2*deg - 1 or less over the interval [-1, 1] with the weight function f(x) = 1.
#
# Hence, in Galerkin methods, to exactly handle a mass matrix where neither of the terms is differentiated, using affine mapping to the reference element [-1,1]
# (implying piecewise constant Jacobian), we need to have
#
# 2*deg - 1 = 2*d
#
# i.e.
#
# deg = (2*d + 1) / 2
#
# deg = int(np.ceil( (2*d + 1)/2. ))
# q,w = np.polynomial.legendre.leggauss( deg )
# print( deg,(2*deg-1),q,w )
print( stuff.C )
print( np.linalg.matrix_rank(stuff.C) ) # should be full rank
plt.figure(2)
plt.spy(stuff.C)
plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' )
# plt.imshow(M, interpolation="nearest", cmap="Oranges")
# plt.colorbar()
plt.title(r"$\mathbf{M}$")
## L,U,p = dgesv.lup(stuff.C)
## print( np.transpose(np.nonzero(L)) )
## print( np.transpose(np.nonzero(U)) )
## print( p )
## plt.figure(3)
## plt.subplot(1,2, 1)
## plt.spy(L)
## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' )
### plt.imshow(L, interpolation="nearest", cmap="Oranges")
### plt.colorbar(orientation="horizontal")
## plt.title(r"$\mathbf{L}$")
## plt.subplot(1,2, 2)
## plt.spy(U)
## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' )
### plt.imshow(U, interpolation="nearest", cmap="Oranges")
### plt.colorbar(orientation="horizontal")
## plt.title(r"$\mathbf{U}$")
LU,p = dgesv.lup_packed(stuff.C)
plt.figure(4)
plt.spy(LU)
plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' )
plt.title(r"$\mathbf{LU}$ (packed format)")
mincols,maxcols = dgesv.find_bands(LU, 1e-15)
print( mincols, maxcols )
## # old Python-based mincols, maxcols finding code
##
## # Find the smallest column index with nonzero data on each row in L.
## #
## # We can use this to "sparsify" the backsolve even though the data structure is dense.
## #
## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix).
## #
## Lnz = np.nonzero(L)
## mincols = []
## rowprev = -1
## n = len(Lnz[0])
## i = 0
## while i < n:
## if Lnz[0][i] != rowprev:
## mincols.append(Lnz[1][i])
## rowprev = Lnz[0][i]
## i += 1
## mincols = np.array( mincols, dtype=np.intc, order="C" )
## print( L )
## print( mincols )
## # Find the largest column index with nonzero data on each row in U.
## #
## # We can use this to "sparsify" the backsolve even though the data structure is dense.
## #
## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix).
## #
## Unz = np.nonzero(U)
## maxcols = []
## rowprev = -1
## n = len(Unz[0])
## i = n - 1
## while i >= 0:
## if Unz[0][i] != rowprev:
## maxcols.append(Unz[1][i])
## rowprev = Unz[0][i]
## i -= 1
## maxcols.reverse()
## maxcols = np.array( maxcols, dtype=np.intc, order="C" )
## print( U )
## print( maxcols )
# Visualize
#
xx = np.linspace(-1., 1., 100001) # the good thing about the fast approach... smooth curves!
plt.figure(1)
plt.clf()
for func in stuff.N:
plt.plot( xx, func(xx) )
plt.axis('tight')
a = plt.axis()
plt.axis( [ a[0], a[1], a[2]*1.05, a[3]*1.05 ] )
plt.grid(b=True, which='both')
plt.title('Hierarchical basis functions')
# Try some operations on the original Legendre polynomials
#
# As long as we keep the Polynomial objects, we can multiply them the intuitive way, producing a new Polynomial:
#
print( stuff.P[2]*stuff.P[3] ) # => poly([ 0. 0.75 0. -3.5 0. 3.75])
# We can also differentiate them, which is useful for constructing the mass matrix:
#
print( stuff.P[2].deriv(1)*stuff.P[3] ) # => poly([ 0. 0. -9. 0. 15.])
# Also integration is supported.
#
# p.integ() returns the definite integral, as a Polynomial object, from lbnd to an unspecified upper limit x, adding the integration constant k.
# The value of x is chosen when calling the resulting object.
#
# Legendre polynomials are L2-orthogonal on [-1,1]:
print( ( (stuff.P[2]*stuff.P[2]).integ(lbnd=-1, k=0) )(1.0) ) # 2/(2 n + 1); here n = 2, so this = 2/5 = 0.4
print( ( (stuff.P[2]*stuff.P[3]).integ(lbnd=-1, k=0) )(1.0) ) # zero
# The integral of dPn/dx * Pm over the interval is zero if:
#
# - n + m is even
# - n < m (and by the previous condition, also n <= m)
#
# These observations are based on the L2-orthogonality and the relation
#
# (2 n + 1) P_n = (d/dx)( P_{n+1} - P_{n-1} ) (*)
#
# which can be used to get rid of the derivative. The relation (*) follows from Bonnet’s recursion formula,
#
# (n + 1) P_{n+1} = (2 n + 1) P_n - n P_{n-1}
#
# By recursive application, (*) leads to the representation
#
# (d/dx) P_{n+1} = (2 n + 1) P_n + ( 2 (n - 2) + 1 ) P_{n-2} + ( 2 (n - 4) + 1 ) P_{n-4} + ...
#
# which is guaranteed to bottom out at P_1 and P_0 (by using P_0 = 1 and P_1 = x in (*)).
#
# See
# https://en.wikipedia.org/wiki/Legendre_polynomials#Additional_properties_of_Legendre_polynomials
#
print( ( (stuff.P[3].deriv(1)*stuff.P[3]).integ(lbnd=-1, k=0) )(1.0) ) # zero, n + m even
print( ( (stuff.P[3].deriv(1)*stuff.P[1]).integ(lbnd=-1, k=0) )(1.0) ) # zero, n + m even
print( ( (stuff.P[2].deriv(1)*stuff.P[3]).integ(lbnd=-1, k=0) )(1.0) ) # zero, n < m
print( ( (stuff.P[3].deriv(1)*stuff.P[2]).integ(lbnd=-1, k=0) )(1.0) ) # nonzero (derivative of p3 contains p2, p0)
# naive solve (repeat the LU decomposition process each time)
#
def method1(reps, A, b, x):
for j in range(reps):
# dgesv.solve( A, b[j,:], x )
dgesv.solve( A, b, x )
# decompose once, then solve
#
def method2(reps, A, b, x):
LU,p = dgesv.lup_packed(A)
for j in range(reps):
# dgesv.solve_decomposed( LU, p, b[j,:], x )
dgesv.solve_decomposed( LU, p, b, x )
# decompose once, then solve, utilize banded structure
#
def method3(reps, A, b, x):
LU,p = dgesv.lup_packed(A)
mincols,maxcols = dgesv.find_bands(LU, 1e-15)
for j in range(reps):
# dgesv.solve_decomposed_banded( LU, p, mincols, maxcols, b[j,:], x )
dgesv.solve_decomposed_banded( LU, p, mincols, maxcols, b, x )
class MyTimer:
t0 = None
l = None
def __init__(self, label=""):
self.label = label
def __enter__(self):
self.t0 = time.time()
def __exit__(self, type, value, traceback):
dt = time.time() - self.t0
l = ("%s: " % self.label) if len(self.label) else "time taken: "
print( "%s%gs" % (l, dt) )
if __name__ == '__main__':
main()
plt.show()
# # Running the benchmark loop at the Python end makes the banded version look slower (for our matrix M, the C code is actually ~3x faster than the generic non-banded version),
# # because a large majority of the execution time is taken up by data conversion from Python to C and back (and Python asserts, if enabled).
# #
# # To get reliable results on the C code only (which is a realistic use case if used from inside a Cython-accelerated solver, which is the whole point of dgesv.pyx),
# # the looping must be done inside dgesv.pyx.
# #
# reps = 100000
# for q in range(3, 16):
# stuff = RandomPileOfTestStuff(q)
# n = np.shape(stuff.C)[0]
## b = np.random.uniform(0.0, 1.0, size=(reps,n)) # this makes slicing part of the performance measurement - not good
# b = np.random.uniform(0.0, 1.0, size=(n,))
# x = np.empty( [n], dtype=np.float64, order="C" )
# print( "Timings for %d runs" % reps )
# with MyTimer("%dx%d naive" % (n,n)) as mt:
# method1(reps, stuff.C, b, x)
# with MyTimer("%dx%d decompose-once" % (n,n)) as mt:
# method2(reps, stuff.C, b, x)
# with MyTimer("%dx%d decompose-once-banded" % (n,n)) as mt:
# method3(reps, stuff.C, b, x)
| bsd-2-clause |
ChanChiChoi/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
altermarkive/Machine-Learning-Course | machine-learning-ex2/ex2/ex2_reg.py | 1 | 10836 | #!/usr/bin/env python3
import numpy as np
import matplotlib
import matplotlib.cm as cm
# Force matplotlib to not use any X Windows backend (must be called befor importing pyplot)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
import scipy.optimize as optimize
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
def plotData(X, y, labels):
#PLOTDATA Plots the data points X and y into a new figure
# PLOTDATA(x,y) plots the data points with + for the positive examples
# and o for the negative examples. X is assumed to be a Mx2 matrix.
# Create New Figure
#figure; hold on;
# ====================== YOUR CODE HERE ======================
# Instructions: Plot the positive and negative examples on a
# 2D plot, using the option 'k+' for the positive
# examples and 'ko' for the negative examples.
#
pos = np.nonzero(y == 1)
neg = np.nonzero(y == 0)
pos_handle = plt.plot(X[pos, 0], X[pos, 1], 'k+', linewidth=2, markersize=7, label=labels[0])[0]
neg_handle = plt.plot(X[neg, 0], X[neg, 1], 'ko', markerfacecolor='y', markersize=7, label=labels[1])[0]
# =========================================================================
#hold off;
return (pos_handle, neg_handle)
def mapFeature(X1, X2):
# MAPFEATURE Feature mapping function to polynomial features
#
# MAPFEATURE(X1, X2) maps the two input features
# to quadratic features used in the regularization exercise.
#
# Returns a new feature array with more features, comprising of
# X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc..
#
# Inputs X1, X2 must be the same size
#
# n = X1.shape[0]
# degree = 6
# out = np.ones((n, 1)).reshape((n, 1))
# for i in range(1, degree + 1):
# for j in range(i + 1):
# term1 = X1 ** (i - j)
# term2 = X2 ** j
# out = np.hstack((out, (term1 * term2).reshape((n, 1))))
data = np.c_[X1, X2]
poly = preprocessing.PolynomialFeatures(6)
out = poly.fit_transform(data)
return out
def sigmoid(z):
#SIGMOID Compute sigmoid functoon
# J = SIGMOID(z) computes the sigmoid of z.
# You need to return the following variables correctly
#g = zeros(size(z));
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the sigmoid of each value of z (z can be a matrix,
# vector or scalar).
g = np.divide(1, (1 + np.power(np.exp(1), -z)))
# =============================================================
return g
def costFunctionReg(theta, X, y, lambda_value):
#COSTFUNCTIONREG Compute cost and gradient for logistic regression with regularization
# J = COSTFUNCTIONREG(theta, X, y, lambda) computes the cost of using
# theta as the parameter for regularized logistic regression and the
# gradient of the cost w.r.t. to the parameters.
# Initialize some useful values
m = y.shape[0] # number of training examples
# You need to return the following variables correctly
#J = 0;
#grad = zeros(size(theta));
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost of a particular choice of theta.
# You should set J to the cost.
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
h = sigmoid(np.dot(X, theta))
cost = np.sum(np.dot(-y.T, np.log(h)) - np.dot((1 - y.T), np.log(1 - h)))
J = (cost / m) + (lambda_value / (2 * m)) * np.sum(theta[1:] ** 2)
extra = lambda_value * theta
extra[0] = 0
grad = (np.dot(X.T, h - y) + extra) / m
# =============================================================
return (J, grad)
def costRegOnly(theta, X, y, lambda_value):
m = y.shape[0]
h = sigmoid(np.dot(X, theta))
if np.any(h <= 0) or np.any(h >= 1):
return np.nan
cost = np.sum(np.dot(-y.T, np.log(h)) - np.dot((1 - y.T), np.log(1 - h)))
J = (cost / m) + (lambda_value / (2 * m)) * np.sum(np.power(theta[1:], 2))
return J
def gradientRegOnly(theta, X, y, lambda_value):
m = y.shape[0]
h = sigmoid(np.dot(X, theta.reshape(-1, 1)))
extra = lambda_value * theta
extra[0] = 0
extra = extra.reshape(-1, 1)
grad = (np.dot(X.T, h - y) + extra) / m
return grad.flatten()
def plotDecisionBoundary(theta, X, y, labels):
#PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
#the decision boundary defined by theta
# PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
# positive examples and o for the negative examples. X is assumed to be
# a either
# 1) Mx3 matrix, where the first column is an all-ones column for the
# intercept.
# 2) MxN, N>3 matrix, where the first column is all-ones
# Plot Data
pos_handle, neg_handle = plotData(X[:, 1:3], y, labels)
#hold on
if X.shape[1] <= 3:
# Only need 2 points to define a line, so choose two endpoints
plot_x = [np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2]
# Calculate the decision boundary line
plot_y = np.dot((-1.0 / theta[2]), (np.dot(theta[1], plot_x) + theta[0]))
# Plot, and adjust axes for better viewing
boundary_handle = plt.plot(plot_x, plot_y, label='Decision Boundary')[0]
# Legend, specific for the exercise
#axis([30, 100, 30, 100])
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z = np.zeros((u.size, v.size))
# Evaluate z = theta*x over the grid
for i in range(u.size):
for j in range(v.size):
z[i, j] = np.dot(mapFeature(np.array([u[i]]), np.array([v[j]])), theta)
z = z.T # important to transpose z before calling contour
# Plot z = 0
# Notice you need to specify the range [0, 0]
u, v = np.meshgrid(u, v)
boundary_handle = plt.contour(u, v, z, [0], linewidth=2).collections[0]
boundary_handle.set_label('Decision Boundary')
#hold off
return (pos_handle, neg_handle, boundary_handle)
def predict(theta, X):
#PREDICT Predict whether the label is 0 or 1 using learned logistic
#regression parameters theta
# p = PREDICT(theta, X) computes the predictions for X using a
# threshold at 0.5 (i.e., if sigmoid(theta'*x) >= 0.5, predict 1)
m = X.shape[0] # Number of training examples
# You need to return the following variables correctly
#p = zeros(m, 1);
# ====================== YOUR CODE HERE ======================
# Instructions: Complete the following code to make predictions using
# your learned logistic regression parameters.
# You should set p to a vector of 0's and 1's
#
p = sigmoid(np.dot(X, theta)) >= 0.5
return p.astype(int)
def ex2_reg():
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the second part
# of the exercise which covers regularization with logistic regression.
#
# You will need to complete the following functions in this exericse:
#
# sigmoid.m
# costFunction.m
# predict.m
# costFunctionReg.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
# Initialization
#clear ; close all; clc
# Load Data
# The first two columns contains the X values and the third column
# contains the label (y).
data = np.loadtxt('ex2data2.txt', delimiter=',')
X = np.reshape(data[:, 0:2], (data.shape[0], 2))
y = np.reshape(data[:, 2], (data.shape[0], 1))
pos_handle, neg_handle = plotData(X, y, ['y = 1', 'y = 0'])
# Put some labels
#hold on;
# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
# Specified in plot order
plt.legend(handles=[pos_handle, neg_handle])
plt.savefig('figure1.reg.png')
# =========== Part 1: Regularized Logistic Regression ============
# In this part, you are given a dataset with data points that are not
# linearly separable. However, you would still like to use logistic
# regression to classify the data points.
#
# To do so, you introduce more features to use -- in particular, you add
# polynomial features to our data matrix (similar to polynomial
# regression).
#
# Add Polynomial Features
# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = mapFeature(X[:, 0], X[:, 1])
# Initialize fitting parameters
initial_theta = np.zeros((X.shape[1], 1))
# Set regularization parameter lambda to 1
lambda_value = 1
# Compute and display initial cost and gradient for regularized logistic
# regression
cost, grad = costFunctionReg(initial_theta, X, y, lambda_value)
print('Cost at initial theta (zeros): %f' % cost)
print('Program paused. Press enter to continue.')
#input()
# ============= Part 2: Regularization and Accuracies =============
# Optional Exercise:
# In this part, you will get to try different values of lambda and
# see how regularization affects the decision coundart
#
# Try the following values of lambda (0, 1, 10, 100).
#
# How does the decision boundary change when you vary lambda? How does
# the training set accuracy vary?
#
# Initialize fitting parameters
initial_theta = np.zeros((X.shape[1], 1))
# Set regularization parameter lambda to 1 (you should vary this)
lambda_value = 1
# Set Options
#options = optimset('GradObj', 'on', 'MaxIter', 400);
# Optimize
result = optimize.minimize(costRegOnly, initial_theta, args=(X, y, lambda_value), method=None, jac=gradientRegOnly, options={"maxiter":400})
theta = result.x
# Plot Boundary
pos_handle, neg_handle, boundary_handle = plotDecisionBoundary(theta, X, y, ['y = 1', 'y = 0'])
plt.title('lambda = %g' % lambda_value)
# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.legend(handles=[pos_handle, neg_handle, boundary_handle])
plt.savefig('figure2.reg.png')
# Compute accuracy on our training set
p = predict(theta, X)
print('Train Accuracy: %f' % (np.mean((p == y.flatten()).astype(int)) * 100))
if __name__ == "__main__":
ex2_reg()
| mit |
MiroK/lega | demo/poisson_1d_fourier.py | 1 | 5037 | #
# -u`` = f in (0, 2pi) with u(0) = u(2*pi)
#
from sympy import Symbol, integrate, pi, lambdify
import lega.fourier_basis as fourier
import numpy as np
def get_rhs(u=None, n_max=32):
'''Verify u and compute the f.'''
x = Symbol('x')
# We can make u as a series with n_max as highest frequency
if u is None:
# The constant is trown out so that the solution is perp. to nullspace
basis = fourier.fourier_basis(n_max)[1:]
coefs = np.random.random(len(basis))
u = sum(c*v for c, v in zip(coefs, basis))
# For given solution we need check properties
else:
assert abs(integrate(u, (x, 0, 2*pi))) < 1E-15
assert abs(u.subs(x, 0) - u.subs(x, 2*pi)) < 1E-15
f = -u.diff(x, 2)
return u, f
def solve(n, f):
'''Solve the problem with n the highest frequency.'''
# FFT on f
x = Symbol('x')
points = np.linspace(0, 2*np.pi, 2*n, endpoint=False)
f = lambdify(x, f, 'numpy')
F = f(points)
F_hat = fourier.fft(F)
# If the FFT was an exact recipe, than a way to check whether the f is
# orthogonal would be to see if abs(F_hat[0]) < 1E-15
# Solve Poisson in wave numbers
ks = fourier.stiffness_matrix(n)
# The first coeff is 0 - orthogonality
U_hat = np.r_[0, F_hat[1:]/ks[1:]]
return U_hat
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy.plotting import plot
from sympy import sin
from math import log
x = Symbol('x')
# Qualitative check
if False:
n_max = 30
u, f = get_rhs(u=None, n_max=n_max)
# Let sin(k*x) is u and we take basis as fourier_basis(k). So u should be
# represented in the basis. But, fft with 2*k points will not see it. E.g.
# to see sin(x) you need 3 points but we use 2. So k+1 in the basis is the
# solution
Uh_hat = solve(n_max+1, f)
uh = fourier.fourier_function(Uh_hat)
# Plot the final numerical one againt analytical
plot(u-uh, (x, 0, 2*pi))
# Quantitative, smooth
if False:
n_max = 50
u, f = get_rhs(u=None, n_max=n_max)
u_lambda = lambdify(x, u, 'numpy')
# Solve with different fequencies
for n in [8, 16, 32, 36, 40, 44, 48, 52, 64]:
Uh_hat = solve(n, f)
# Grid represent the solution
Uh = fourier.ifft(Uh_hat)
# Represent the solution on a fine grid
m = len(Uh)
points = np.linspace(0, 2*np.pi, m, endpoint=False)
U = u_lambda(points)
error = np.linalg.norm(U - Uh)/m
if n > 8:
rate = log(error/error_)/log(n_/float(n))
print n, error, rate
error_ = error
n_ = n
uh = fourier.fourier_function(Uh_hat)
# Plot the final numerical one againt analytical
plot(u-uh, (x, 0, 2*pi))
# Quantitative, kink
if True:
u = x*(x-2*pi)*(x-pi)
u, f = get_rhs(u=u)
u_lambda = lambdify(x, u, 'numpy')
# Solve with different fequencies
for n in (2**i for i in range(5, 15)):
Uh_hat = solve(n, f)
# Grid represent the solution
Uh = fourier.ifft(Uh_hat)
# Represent the solution on a fine grid
m = len(Uh)
points = np.linspace(0, 2*np.pi, m, endpoint=False)
U = u_lambda(points)
error = np.linalg.norm(U - Uh)/m
if n > 32:
rate = log(error/error_)/log(n_/float(n))
print n, error, rate
error_ = error
n_ = n
# Plot the error
import matplotlib.pyplot as plt
plt.figure()
plt.plot(points, U, label='$u$')
plt.plot(points, Uh, label='$uh$')
plt.xlim((0, 2*np.pi))
plt.legend(loc='best')
# Let's relate the rate of convergence (in l2 norm) to the rate with
# which the coefficients of the Fourier image of f decrease
F = lambdify(x, f)(points)
F_hat = fourier.fft(F)
# Skip constant - orthogonality
F_hat_cos = F_hat[1:m/2+1]
F_hat_sin = F_hat[m/2+1:]
plt.figure()
# The function periodically extended is odd -> no cos
# plt.plot(F_hat_cos, label='$a_k$')
# Spectrum is concerned with magnitude
F_hat_sin = np.abs(F_hat_sin)
ks = np.arange(1, len(F_hat_sin)+1)
# Hide zeros
not0 = np.where(F_hat_sin > 1E-14)[0]
# Don't forget the action of the Laplacian
plt.loglog(ks[not0], F_hat_sin[not0]/(ks[not0]**2), label='$b_k$')
plt.loglog(ks, ks**(-3.), linestyle='--', label='rate 3')
plt.legend(loc='best')
# The message is that the rate is related to how the spectrum of f
# decreases! Too lazy/busy now to find exact relation.
plt.show()
| mit |
IFDYS/IO_MPI | view_result.py | 1 | 6681 | #!/usr/bin/env python
from numpy import *
from matplotlib.pyplot import *
import matplotlib.pylab as pylab
import os
import time
import re
import obspy.signal
from scipy import signal
def read_slice(fname):
with open(fname) as fslice:
slice_nx,slice_ny,slice_nz = fslice.readline().split()
slice_x = fslice.readline().split()
slice_y = fslice.readline().split()
slice_z = fslice.readline().split()
slice_nx = int(slice_nx);slice_ny = int(slice_ny);slice_nz = int(slice_nz)
return slice_nx,slice_ny,slice_nz
def read_rec(frec):
global nrec
with open(frec) as fp:
nrec = int(file.readline().strip('\n'))
def read_par():
global nx,ny,nz,slice_nx,slice_ny,slice_nz,nt,dx,dy,dz,dt
with open('par.in') as fpar:
fpar.readline()
dx,dy,dz,dt = fpar.readline().split()
print 'dx dy dz dt: ',dx,dy,dz,dt
fpar.readline()
nx,ny,nz,nt = fpar.readline().split()
nx = int(nx);ny = int(ny);nz = int(nz);nt=int(nt)
print 'nx ny nz nt: ',nx,ny,nz,nt
fpar.readline()
nt_src = fpar.readline()
print 'nt of src: ',nt_src
fpar.readline()
step_t_wavefield,step_x_wavefield = fpar.readline().split()
print 'output time step and space step of wavefidld: ',step_t_wavefield,step_x_wavefield
fpar.readline()
step_slice = fpar.readline()
print 'output step of slice: ',step_slice
fpar.readline()
npml_x,npml_y,npml_z= fpar.readline().split()
print 'npml x y z: ',npml_x,npml_y,npml_z
fpar.readline()
fpar.readline() #pml m kapxmax kapymax kapzmax alpha
fpar.readline()
fsrc= fpar.readline().strip('\n')
print 'src.in: ',fsrc
fpar.readline()
frec= fpar.readline().strip('\n')
print 'rec.in: ',frec
fpar.readline()
feps = fpar.readline().strip('\n')
fpar.readline()
fmu = fpar.readline().strip('\n')
fpar.readline()
fsig= fpar.readline().strip('\n')
fpar.readline()
fslice= fpar.readline().strip('\n')
slice_nx,slice_ny,slice_nz = read_slice(fslice)
def view_slice():
xlist = os.popen('ls *xSlice*dat').readlines()
ylist = os.popen('ls *ySlice*dat').readlines()
zlist = os.popen('ls *zSlice*dat').readlines()
i = 0
for xname in xlist:
print xname
yname = ylist[i]
zname = zlist[i]
i += 1
xdata = loadtxt(xname.strip('\n'))
ydata = loadtxt(yname.strip('\n'))
zdata = loadtxt(zname.strip('\n'))
xslice = reshape(xdata,(slice_nx,ny,nz))
yslice = reshape(ydata,(slice_ny,nx,nz))
zslice = reshape(zdata,(slice_nz,nx,ny))
# data = reshape(data,(126,101))
clf()
imshow(xslice[0])
colorbar()
savefig(re.findall("^\w+",xname)[0]+".png")
clf()
imshow(yslice[0])
colorbar()
savefig(re.findall("^\w+",yname)[0]+".png")
clf()
imshow(zslice[0])
colorbar()
savefig(re.findall("^\w+",zname)[0]+".png")
# show()
# show(block=False)
# time.sleep(0.5)
# close()
def view_gather():
global nrec,nt,zero_offset
STD_gather = loadtxt('../STD_gather.dat')
ilist = os.popen('ls Rank*gather*dat').readlines()
ii = 0
num_name = 0
isum = []
zero_offset = []
for name in ilist:
# num_gather = 0
# figure()
ii = 0
isum = []
gather = loadtxt(name.strip('\n'))
# if gather.max() == 0 and gather.min() == 0:
# continue
if shape(gather)[0] == 0:
continue
if shape(shape(gather))[0] == 1:
isum.append(gather)
plot(gather/max(abs(gather))+ii)
else:
for i in range(len(gather[:,0])):
# num_gather += 1
if(num_name == i):
data = gather[i,:]-STD_gather
# data = signal.detrend(data)
# n = 2**int(ceil(log2(len(data))))
# freqs = np.linspace(0, 1/double(dt)/2, n/2+1)
# sp = np.fft.rfft(data,n)/n
# W = abs(sp)
# plot(freqs,W)
# show()
# # Ddata = signal.detrend(data[200:])
# # lowpass = obspy.signal.filter.lowpass(data,10000000,1.0/double(dt),corners=4,zerophase=True)
# highpass = obspy.signal.filter.highpass(data,4e7,1.0/double(dt),corners=4,zerophase=True)
# # result = gather[i,:] + lowpass
# result = highpass
# # plot(Ddata)
# # plot(lowpass)
# plot(result)
# plot(data)
zero_offset.append(data/max(abs(data)))
# plot(data)
# show()
# zero_offset.append(result/max(result))
# plot(gather[i,:]/max(abs(gather[i,:]))+ii)
isum.append(gather[i,:]/max(abs(gather[i,:])))
# print i,num_name
# plot(gather[i,:]/max(abs(gather[i,:]))+ii)
# ii =ii+1
num_name += 1
# show()
# figure()
# imshow(isum,cmap='gray',origin='lower',extent=(0,5000,0,3000),vmax=0.1, vmin=-0.1)
# show()
figure()
imshow(zero_offset,cmap='gray',origin='lower',extent=(0,5000,0,3000))
show()
# savefig('gather.png')
# for i in range(len(isum)):
# figure()
# plot(isum[i])
# show()
def prepare_RTM():
idir = './RTM/'
if not os.path.exists(idir):
os.mkdir(idir)
os.system('cp FDTD_MPI par.in rec.in mkmodel.py ./RTM/')
# os.system('cp rec.in src_RTM.in')
# with file('src_RTM.in', 'aw') as fsrc:
# for i in range(len(zero_offset)):
# savetxt(fsrc,zero_offset[i][::-1])
with file('rec.in','r') as frec:
nrec = int(frec.readline().split()[0])
fsrc=open('src_RTM.in','w')
print "nrec nt: ",int(nrec),int(nt)
fsrc.write("%d %d\n" % (nrec, int(nt)))
fsrc.close()
fsrc=open('src_RTM.in','a')
for i in range(nrec):
fsrc.write(frec.readline())
for i in range(nrec):
savetxt(fsrc,zero_offset[i][::-1])
fsrc.close
os.system('cp src_RTM.in ./RTM/src.in')
read_par()
os.chdir("./Output/")
# view_gather()
view_slice()
# os.chdir("../")
# prepare_RTM()
#view_wavefield()
| gpl-2.0 |
theislab/scvelo | scvelo/plotting/palettes.py | 1 | 4858 | """Color palettes in addition to matplotlib's palettes."""
from matplotlib import cm, colors
# Colorblindness adjusted vega_10
# See https://github.com/theislab/scanpy/issues/387
vega_10 = list(map(colors.to_hex, cm.tab10.colors))
vega_10_scanpy = vega_10.copy()
vega_10_scanpy[2] = "#279e68" # green
vega_10_scanpy[4] = "#aa40fc" # purple
vega_10_scanpy[8] = "#b5bd61" # kakhi
# default matplotlib 2.0 palette
# see 'category20' on https://github.com/vega/vega/wiki/Scales#scale-range-literals
vega_20 = list(map(colors.to_hex, cm.tab20.colors))
# reorderd, some removed, some added
vega_20_scanpy = [
*vega_20[0:14:2],
*vega_20[16::2], # dark without grey
*vega_20[1:15:2],
*vega_20[17::2], # light without grey
"#ad494a",
"#8c6d31", # manual additions
]
vega_20_scanpy[2] = vega_10_scanpy[2]
vega_20_scanpy[4] = vega_10_scanpy[4]
vega_20_scanpy[7] = vega_10_scanpy[8] # kakhi shifted by missing grey
default_20 = vega_20_scanpy
# fmt: off
# orig reference http://epub.wu.ac.at/1692/1/document.pdf
zeileis_26 = [
"#023fa5", "#7d87b9", "#bec1d4", "#d6bcc0", "#bb7784", "#8e063b", "#4a6fe3",
"#8595e1", "#b5bbe3", "#e6afb9", "#e07b91", "#d33f6a", "#11c638", "#8dd593",
"#c6dec7", "#ead3c6", "#f0b98d", "#ef9708", "#0fcfc0", "#9cded6", "#d5eae7",
"#f3e1eb", "#f6c4e1", "#f79cd4", "#7f7f7f", "#c7c7c7", "#1CE6FF", "#336600",
]
default_26 = zeileis_26
# from godsnotwheregodsnot.blogspot.de/2012/09/color-distribution-methodology.html
godsnot_64 = [
# "#000000", # remove the black, as often, we have black colored annotation
"#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF",
"#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF",
"#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92",
"#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299",
"#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500", "#C2FFED",
"#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062",
"#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66", "#885578",
"#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F",
"#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757",
"#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C",
"#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#72418F",
"#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55",
"#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C"
]
default_64 = godsnot_64
# colors in addition to matplotlib's colors
additional_colors = {
'gold2': '#eec900', 'firebrick3': '#cd2626', 'khaki2': '#eee685',
'slategray3': '#9fb6cd', 'palegreen3': '#7ccd7c', 'tomato2': '#ee5c42',
'grey80': '#cccccc', 'grey90': '#e5e5e5', 'wheat4': '#8b7e66', 'grey65': '#a6a6a6',
'grey10': '#1a1a1a', 'grey20': '#333333', 'grey50': '#7f7f7f', 'grey30': '#4d4d4d',
'grey40': '#666666', 'antiquewhite2': '#eedfcc', 'grey77': '#c4c4c4',
'snow4': '#8b8989', 'chartreuse3': '#66cd00', 'yellow4': '#8b8b00',
'darkolivegreen2': '#bcee68', 'olivedrab3': '#9acd32', 'azure3': '#c1cdcd',
'violetred': '#d02090', 'mediumpurple3': '#8968cd', 'purple4': '#551a8b',
'seagreen4': '#2e8b57', 'lightblue3': '#9ac0cd', 'orchid3': '#b452cd',
'indianred 3': '#cd5555', 'grey60': '#999999', 'mediumorchid1': '#e066ff',
'plum3': '#cd96cd', 'palevioletred3': '#cd6889'
}
# fmt: on
from typing import Mapping, Sequence
def _plot_color_cylce(clists: Mapping[str, Sequence[str]]):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
fig, axes = plt.subplots(nrows=len(clists)) # type: plt.Figure, plt.Axes
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.3, right=0.99)
axes[0].set_title("Color Maps/Cycles", fontsize=14)
for ax, (name, clist) in zip(axes, clists.items()):
n = len(clist)
ax.imshow(
np.arange(n)[None, :].repeat(2, 0),
aspect="auto",
cmap=ListedColormap(clist),
norm=BoundaryNorm(np.arange(n + 1) - 0.5, n),
)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.0
fig.text(x_text, y_text, name, va="center", ha="right", fontsize=10)
# Turn off all ticks & spines
for ax in axes:
ax.set_axis_off()
fig.show()
if __name__ == "__main__":
_plot_color_cylce(
{name: colors for name, colors in globals().items() if isinstance(colors, list)}
)
| bsd-3-clause |
yonahbox/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
soylentdeen/Graffiti | src/Graffiti.py | 1 | 20420 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is the main file for launching the PyQt app.
Requirements:
You need Python 2 (+ matplotlib, pylab and pyfits modules), Qt4 and PyQt for this to work
Description:
- Graffiti.py is this Python file, must be executable.
Launch the GUI using the command (in terminal):
> python -i Graffiti.py
- demo.ui is an XML file, modifiable/editable in a friendly user manner with - Qt Designer or Qt creator (comes with Qt installation).
- demo_ui.py is the PyQt Python file, generated with the terminal command :
> pyuic4 demo.ui -o demo_ui.py (pyuic4 comes with PyQt package)
> python /usr/lib64/python/site-packages/PyQt4/uic/pyuic.py Graffiti.ui -o Graffiti_ui.py
This command has to be typed each time the GUI is modified in order to take into account the changes in PyQt (this program).
Note: Once generated DO NOT EDIT the demo_ui.py file. Only PyQtDemo.py (this file) can be edited safely.
"""
import sys# This module provides access to some variables used or maintained by the interpreter and to functions that interact strongly with the interpreter. It is always available.
sys.path.insert(0, './lib') #Add in this python session lib path
import os # enable shell commands in python. See also http://www.pythonforbeginners.com/os/pythons-os-module
#try:
import pyfits # See also http://www.stsci.edu/institute/software_hardware/pyfits
#except:
# from astropy.io import fits as pyfits #https://astropy.readthedocs.org/en/v0.3/io/fits/index.html
from matplotlib.pylab import * #Usefull library for plotting stuff (pylab)
from matplotlib.mlab import * #Usefull library for plotting stuff
#from libfits import * #usefull stuff for reading fits
# Qt4 libraries
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# From the "automatically generated" demo_ui.py file (see above) it imports the "Ui_demo_qt" class that defines everything that was created in the Qt GUI file (demo.ui). Do NOT change anything in the demo_ui file. If you want to change something in the GUI, use Qt designer to edit the demo.ui file and re-run the command: "pyuic4 demo.ui -o demo_ui.py" again.
# Note: The "Ui_demo_qt" name was choosen from the name the user put in the QMainWindow (see demo.ui) in which the pyuic4 command added Ui_ suffix.
from Graffiti_ui import Ui_Graffiti #Required
import pdb # Enter in the debug mode by placing : pdb.set_trace() in your program. Note ipython enables it automatically by entering %pdb
# See also debug mode explanation here:
# http://www.fevrierdorian.com/blog/post/2009/11/04/Un-debugger-dans-Python-pour-voir-pr%C3%A9cis%C3%A9ment-ce-qui-ce-passe-dans-son-code
import VLTTools # Object created for talking to the VLT Tools - Added by C.Deen on 15 Dec 2014
import GuiTools # GUI objects - Added by C. Deen on 16 Dec 2014
"""
__ __ _ ____ _ _ ___ _
| \/ | __ _(_)_ __ / ___| | | |_ _| ___| | __ _ ___ ___
| |\/| |/ _` | | '_ \ | | _| | | || | / __| |/ _` / __/ __|
| | | | (_| | | | | | | |_| | |_| || | | (__| | (_| \__ \__ \
|_| |_|\__,_|_|_| |_| \____|\___/|___| \___|_|\__,_|___/___/
"""
class Graffiti_ui_class( QtGui.QMainWindow ):
#=========================================================================================
# Here we define the class of the main Ui we will manipulate in python.
#=========================================================================================
def __init__( self, parent=None, aortc=None ): # This is what happens when an object of this class is called. It's a kind of init.
QtGui.QWidget.__init__( self, parent )
self.aortc = aortc # aortc is the VLT Connection to the work station
self.ui = Ui_Graffiti() #ui. is the GUI
self.ui.setupUi( self ) # Just do it
self.TTGain = False
self.HOGain = False
# Connects the "Take Background" button with the correct plumbing
QtCore.QObject.connect( self.ui.BackgroundButton,
QtCore.SIGNAL("clicked()"), self.measureBackground)
self.readTipTilt()
# Connect the "Tip" and "Tilt" buttons with the correct plumbing
QtCore.QObject.connect( self.ui.TipButton, QtCore.SIGNAL("clicked()"),
self.adjustTip)
QtCore.QObject.connect( self.ui.TiltButton, QtCore.SIGNAL("clicked()"),
self.adjustTilt)
self.ui.group = QButtonGroup(exclusive=False)
self.ui.group.addButton(self.ui.TT_GainSelector)
self.ui.group.addButton(self.ui.HO_GainSelector)
QtCore.QObject.connect(self.ui.TT_GainSelector,
QtCore.SIGNAL("clicked()"), self.gainSelector)
QtCore.QObject.connect(self.ui.HO_GainSelector,
QtCore.SIGNAL("clicked()"), self.gainSelector)
QtCore.QObject.connect( self.ui.SetGain, QtCore.SIGNAL("clicked()"),
self.setGain)
self.DM_Gui = GuiTools.DM_Gui(aortc=aortc)
"""
#Initialize attributes of the GUI
self.ui.nbPushed = 0 # nb of times the push button was pushed
self.ui.twoStateButtonStatus = "released" #Current state of 2 state button
self.ui.nbItemsComboBox = 0 # Current number of items in the combo box
# We group the 4 radio buttons so that when one is checked all the others are unchecked automaticallly
#self.ui.group = QButtonGroup()
#self.ui.group.addButton(self.ui.radioButton_1)
#self.ui.group.addButton(self.ui.radioButton_2)
#self.ui.group.addButton(self.ui.radioButton_3)
#self.ui.group.addButton(self.ui.radioButton_4)
#We connect objects with the proper signal to interact with them...
QtCore.QObject.connect( self.ui.loadfitsButton, QtCore.SIGNAL("clicked()"), self.selectFITS ) #Connects "loadfitsButton" button to the "selectFITS" method
self.ui.twoStateButton.setCheckable(True)
self.ui.twoStateButton.clicked[bool].connect(self.twoStateButtonIsPushed) # We define the 2 state buttons here...
QtCore.QObject.connect( self.ui.pushButton, QtCore.SIGNAL("clicked()"), self.theButtonIsPushed ) #Connects "pushButton" to the "theButtonIsPushed" method
QtCore.QObject.connect( self.ui.plotRandom, QtCore.SIGNAL("clicked()"), self.theButtonPlotRandomIsPushed ) #Connects "plotRandom" to the "theButtonPlotRandomIsPushed" method
QtCore.QObject.connect( self.ui.okButton, QtCore.SIGNAL("clicked()"), self.theButtonOKIsClicked ) #Connects "OK" button to the "theButtonOKIsClicked" method
QtCore.QObject.connect( self.ui.resetCombobox, QtCore.SIGNAL("clicked()"), self.resetComboboxClicked ) #Connects "OK" button to the "theButtonOKIsClicked" method
#We connect here all the radiobutton to the "radioButtonWasClicked" method
QtCore.QObject.connect( self.ui.radioButton_1, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_2, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_3, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_4, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
#Connects the signal when the combobox is changed
QtCore.QObject.connect(self.ui.ComboBox, QtCore.SIGNAL("currentIndexChanged(QString)"), self.getComboBox)
#End of GUI Class initialization
#"""
def initialize(self):
#print "Hi"
updateDMPos()
def measureBackground(self):
print "Let's measure a background!"
self.aortc.changePixelTapPoint("RAW")
self.aortc.updateAcq()
self.aortc.measureBackground(10)
self.aortc.changePixelTapPoint("CALIB")
self.aortc.updateAcq()
def gainSelector(self):
if(self.ui.TT_GainSelector.isChecked()):
self.TTGain = True
if(self.ui.HO_GainSelector.isChecked()):
self.HOGain = True
def setGain(self):
try:
gain = self.ui.Gain.text().toFloat()[0]
if (gain < 1.0) & (gain > 0.0):
if self.TTGain:
self.aortc.set_TT_gain(-gain)
if self.HOGain:
self.aortc.set_HO_gain(-gain)
else:
print("Error! Gain must be between 1.0 and 0.0!!")
except:
print("Error! Parsing error!")
"""
_ ____ _ _
_ __ _ _ ___| |__ | __ ) _ _| |_| |_ ___ _ __
| '_ \| | | / __| '_ \| _ \| | | | __| __/ _ \| '_ \
| |_) | |_| \__ \ | | | |_) | |_| | |_| || (_) | | | |
| .__/ \__,_|___/_| |_|____/ \__,_|\__|\__\___/|_| |_|
|_|
"""
def readTipTilt(self):
tip, tilt = self.aortc.get_TipTilt()
self.ui.Tip_SpinBox.setValue(tip)
self.ui.Tilt_SpinBox.setValue(tilt)
def adjustTip(self):
try:
tip = self.ui.Tip_SpinBox.value()
if (tip < 1.0) & (tip > -1.0):
self.aortc.setTip(tip)
else:
print("Error! Tip must be between -1.0 and 1.0!")
except:
print("Error! Something's wrong with getting value from Tip_SpinBox")
def adjustTilt(self):
try:
tilt = self.ui.Tilt_SpinBox.value()
if (tilt < 1.0) & (tilt > -1.0):
self.aortc.setTilt(tilt)
else:
print("Error! Tilt must be between -1.0 and 1.0!")
except:
print("Error! Something's wrong with getting value from Tilt_SpinBox")
def theButtonIsPushed(self):
#=========================================================================================
# This method is called when the push button is clicked
#=========================================================================================
self.ui.nbPushed+=1
mess = "This button has been pushed %d time(s)" % self.ui.nbPushed
print mess
self.ui.dialogBox.setText(mess) #Shows the message in the GUI dialogbox
def theButtonPlotRandomIsPushed(self):
#=========================================================================================
# This method is called when the plot random image in window # is clicked
#=========================================================================================
winnum = self.ui.winNumber.value() # Retrieves the desired window number
pliInGui(np.random.rand(256,256), win=winnum) # Displays random array in the desired matplotlib embedded window
"""
_ _ ____ _ _
_ __ __ _ __| (_) ___ | __ ) _ _| |_| |_ ___ _ __
| '__/ _` |/ _` | |/ _ \| _ \| | | | __| __/ _ \| '_ \
| | | (_| | (_| | | (_) | |_) | |_| | |_| || (_) | | | |
|_| \__,_|\__,_|_|\___/|____/ \__,_|\__|\__\___/|_| |_|
"""
def radioButtonWasClicked(self):
#==============================================================================
# This method is Called when one of the radiobuttons are clicked
#==============================================================================
if(self.ui.radioButton_1.isChecked()):
mess = "No! God outstands. Eric does not count."
elif(self.ui.radioButton_2.isChecked()):
mess= "No! Even a master Jedi is not as good as him!"
elif(self.ui.radioButton_3.isChecked()):
mess= "Almost.... Fab is second in the list (will be 1st soon ;-) )"
elif(self.ui.radioButton_4.isChecked()):
mess= "Yes! Zozo = The best ;-)"
else:
mess="Oups I shoudn't be there..."
self.ui.dialogBox.setText(mess) #Shows the message in the GUI dialogbox
"""
____ _ _ ____ _ _
|___ \ ___| |_ __ _| |_ ___| __ ) _ _| |_| |_ ___ _ __
__) / __| __/ _` | __/ _ \ _ \| | | | __| __/ _ \| '_ \
/ __/\__ \ || (_| | || __/ |_) | |_| | |_| || (_) | | | |
|_____|___/\__\__,_|\__\___|____/ \__,_|\__|\__\___/|_| |_|
"""
def twoStateButtonIsPushed(self, pressed):
#==============================================================================
# This method is Called when the 2 state button is clicked
#==============================================================================
if(pressed):
self.ui.twoStateButtonStatus = "pushed" #if pressed we set the twoStateButtonStatus attribute to pushed
else:
self.ui.twoStateButtonStatus = "released" #if pressed we set the twoStateButtonStatus attribute to released
self.ui.twoStateButton.setText("2 state Button (%s)" % self.ui.twoStateButtonStatus) # update the label of the button with proper status
mess = "2 state buttton is now %s" % self.ui.twoStateButtonStatus
print mess
self.ui.dialogBox.setText( mess ) # displays message in dialogbox
"""
__ _ _ ____ _ _
/ _(_) | ___ / ___| ___| | ___ ___| |_ ___ _ __
| |_| | |/ _ \ \___ \ / _ \ |/ _ \/ __| __/ _ \| '__|
| _| | | __/ ___) | __/ | __/ (__| || (_) | |
|_| |_|_|\___| |____/ \___|_|\___|\___|\__\___/|_|
"""
def selectFITS(self):
#==============================================================================
# This method is called when the "load fits file"button is called
#==============================================================================
filepath = QtGui.QFileDialog.getOpenFileName( self, "Select FITS file", "./data/", "FITS files (*.fits);;All Files (*)") #Note: Use getOpenFileNames method (with a "s") to enable multiple file selection
print filepath
if(filepath!=''):
print (str(filepath))
data = pyfits.getdata(str(filepath)) # Load fits file using the pyfits library
pliInGui(data) # Displays the data in the GUI.
mess = filepath+" displayed in window 1"
else:
mess = "No File selected skipping..."
print mess
self.ui.dialogBox.setText(mess) # displays message
"""
_ ____
___ ___ _ __ ___ | |__ ___ | __ ) _____ __
/ __/ _ \| '_ ` _ \| '_ \ / _ \| _ \ / _ \ \/ /
| (_| (_) | | | | | | |_) | (_) | |_) | (_) > <
\___\___/|_| |_| |_|_.__/ \___/|____/ \___/_/\_\
"""
def theButtonOKIsClicked(self):
#==============================================================================
# This method is called when "ok" button is clicked
#==============================================================================
text = str(self.ui.textEdit.toPlainText()) #Get the text from the text edit Field entry
self.ui.ComboBox.addItem(text)# Adds the text in combo box. Note: Use currentText() to get the current Text in the combobox
mess = "Added Message: %s in Combobox" % text
print mess
self.ui.dialogBox.setText(mess) # prints some messages...
self.ui.nbItemsComboBox += 1 # updates the "nbItemsComboBox" attribute
self.ui.ComboBox.setCurrentIndex(self.ui.nbItemsComboBox-1) # sets the current item to the last one entered
def resetComboboxClicked(self):
#==============================================================================
# This method is called when "reset ComboBox" button is clicked
#==============================================================================
nb = self.ui.ComboBox.count() # retrieves the nb of items in the combo box
for i in range(nb):
self.ui.ComboBox.removeItem(0) # removes the first item "nb" times => i.e clear all items
self.ui.nbItemsComboBox = 0 #upodates the attribute
def getComboBox(self):
#==============================================================================
# This method is called when the combo selector is changed by the user
#==============================================================================
currText=self.ui.ComboBox.currentText() #Retrieves the ciurrent text displayed in comboBox
mess = "ComboBox changed to: %s" % currText
self.ui.dialogBox.setText(mess) #displays message
"""
_ _ __ _ _
___ | |_| |__ ___ _ __ ___ / _|_ _ _ __ ___| |_(_) ___ _ __ ___
/ _ \| __| '_ \ / _ \ '__/ __| | |_| | | | '_ \ / __| __| |/ _ \| '_ \/ __|
| (_) | |_| | | | __/ | \__ \ | _| |_| | | | | (__| |_| | (_) | | | \__ \
\___/ \__|_| |_|\___|_| |___/ |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
_ __ _ _ _ _
__| | ___ / _(_)_ __ (_) |_(_) ___ _ __
/ _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \
| (_| | __/ _| | | | | | |_| | (_) | | | |
\__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|
"""
def updateDMPos(color='gist_earth'):
exec("wp.ui.DMPlotWindow.canvas.axes.clear()")
exec("wp.ui.DMPlotWindow.canvas.axes.matshow(wp.DM_Gui.pixels.transpose(), aspect='auto', origin='lower', cmap=color, vmin=-1.0, vmax=1.0)")
exec("wp.ui.DMPlotWindow.canvas.axes.set_xticks([])")
exec("wp.ui.DMPlotWindow.canvas.axes.set_yticks([])")
exec("wp.ui.DMPlotWindow.canvas.axes.set_xticklabels([])")
exec("wp.ui.DMPlotWindow.canvas.axes.set_yticklabels([])")
exec("wp.ui.DMPlotWindow.canvas.draw()")
for i in range(len(wp.DM_Gui.HOactuators)):
x = str(wp.DM_Gui.HOactuators[i].xTextAnchor)
y = str(wp.DM_Gui.HOactuators[i].yTextAnchor)
txt = str(wp.DM_Gui.HOactuators[i])
exec("wp.ui.DMPlotWindow.canvas.axes.text("+x+", "+y+", '"+txt+"')")
for i in range(len(wp.DM_Gui.TTactuators)):
x = str(wp.DM_Gui.TTactuators[i].xTextAnchor)
y = str(wp.DM_Gui.TTactuators[i].yTextAnchor)
txt = str(wp.DM_Gui.TTactuators[i])
exec("wp.ui.DMPlotWindow.canvas.axes.text("+x+", "+y+", '"+txt+"')")
#wp.DM_Gui.drawMap()
print "updated DM Positions"
#DMPos = 'junk'#aortc.get_HO_ACT_POS_REF_MAP()
"""
_ _ _ _ ____ ____
| | __ _ _ _ _ __ ___| |__ (_)_ __ __ _ / \ | _ \| _ \
| |/ _` | | | | '_ \ / __| '_ \| | '_ \ / _` | / _ \ | |_) | |_) |
| | (_| | |_| | | | | (__| | | | | | | | (_| | / ___ \| __/| __/
|_|\__,_|\__,_|_| |_|\___|_| |_|_|_| |_|\__, | /_/ \_\_| |_|
|___/
"""
#==============================================================================================================
# !!!!! Here we launch the MAIN PyQt application !!!!!
#==============================================================================================================
hostname = "aortc3"
username = "ciaomgr"
aortc = VLTTools.VLTConnection(hostname=hostname, username=username,
simulate=False)
app = QApplication([]) #Defines that the app is a Qt application
wp = Graffiti_ui_class(aortc = aortc) # !!!!!!! THE GUI REALLY STARTS HERE !!!!!!
wp.initialize() # Can I initialize here?
wp.show() # shows the GUI (can be hidden by typing wp.hide())
print "Graffiti loaded."
| gpl-2.0 |
sangwook236/sangwook-library | python/test/machine_learning/keras/run_fc_densenet_using_camvid_generator.py | 2 | 17556 | # REF [paper] >> "Densely Connected Convolutional Networks", arXiv 2016.
# REF [paper] >> "The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation", arXiv 2016.
# REF [site] >> https://github.com/titu1994/Fully-Connected-DenseNets-Semantic-Segmentation
# Path to libcudnn.so.
#export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
#--------------------------------------------------------------------
import os, sys
if 'posix' == os.name:
swl_python_home_dir_path = '/home/sangwook/work/SWL_github/python'
lib_home_dir_path = '/home/sangwook/lib_repo/python'
else:
swl_python_home_dir_path = 'D:/work/SWL_github/python'
lib_home_dir_path = 'D:/lib_repo/python'
#lib_home_dir_path = 'D:/lib_repo/python/rnd'
sys.path.append(swl_python_home_dir_path + '/src')
sys.path.append(lib_home_dir_path + '/Fully-Connected-DenseNets-Semantic-Segmentation_github')
#os.chdir(swl_python_home_dir_path + '/test/machine_learning/keras')
#--------------------
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
from keras import models
from keras import optimizers, callbacks
import densenet_fc as dc
from swl.util.threading import ThreadSafeGenerator
from swl.machine_learning.keras.data_generator import create_dataset_generator_using_imgaug, DatasetGeneratorUsingImgaug
from swl.machine_vision.camvid_dataset import preprocess_camvid_dataset, load_camvid_dataset, create_camvid_generator_from_array, create_camvid_generator_from_directory
from swl.machine_vision.camvid_dataset import get_imgaug_sequence_for_camvid
#--------------------------------------------------------------------
config = tf.ConfigProto()
#config.allow_soft_placement = True
config.log_device_placement = True
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.4 # only allocate 40% of the total memory of each GPU.
sess = tf.Session(config=config)
# This means that Keras will use the session we registered to initialize all variables that it creates internally.
K.set_session(sess)
K.set_learning_phase(0)
#keras_backend = 'tf'
#--------------------------------------------------------------------
# Prepare directories.
output_dir_path = './result/fc_densenet_using_camvid_generator'
log_dir_path = './log/fc_densenet_using_camvid_generator'
model_dir_path = output_dir_path + '/model'
prediction_dir_path = output_dir_path + '/prediction'
train_summary_dir_path = log_dir_path + '/train'
test_summary_dir_path = log_dir_path + '/test'
if not os.path.exists(model_dir_path):
try:
os.makedirs(model_dir_path)
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
if not os.path.exists(prediction_dir_path):
try:
os.makedirs(prediction_dir_path)
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
if not os.path.exists(train_summary_dir_path):
try:
os.makedirs(train_summary_dir_path)
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
if not os.path.exists(test_summary_dir_path):
try:
os.makedirs(test_summary_dir_path)
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
model_checkpoint_best_filepath = model_dir_path + '/fc_densenet_using_camvid_generator_best.hdf5' # For a best model.
model_checkpoint_filepath = model_dir_path + '/fc_densenet_using_camvid_generator_weight_{epoch:02d}-{val_loss:.2f}.hdf5'
model_json_filepath = model_dir_path + '/fc_densenet_using_camvid_generator.json'
model_weight_filepath = model_dir_path + '/fc_densenet_using_camvid_generator_weight.hdf5'
#model_filepath = model_dir_path + '/fc_densenet_using_camvid_generator_epoch{}.hdf5' # For a full model.
model_filepath = model_checkpoint_best_filepath
#--------------------------------------------------------------------
# Parameters.
np.random.seed(7)
num_examples = 367
num_classes = 12 # 11 + 1.
batch_size = 12 # Number of samples per gradient update.
num_epochs = 1000 # Number of times to iterate over training data.
steps_per_epoch = num_examples // batch_size if num_examples > 0 else 50
if steps_per_epoch < 1:
steps_per_epoch = 1
shuffle = False
max_queue_size = 10
workers = 4
use_multiprocessing = False
#--------------------------------------------------------------------
# Prepare dataset.
if 'posix' == os.name:
#dataset_home_dir_path = '/home/sangwook/my_dataset'
dataset_home_dir_path = '/home/HDD1/sangwook/my_dataset'
else:
dataset_home_dir_path = 'D:/dataset'
train_image_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/train/image'
train_label_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/trainannot/image'
val_image_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/val/image'
val_label_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/valannot/image'
test_image_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/test/image'
test_label_dir_path = dataset_home_dir_path + '/pattern_recognition/camvid/tmp/testannot/image'
image_suffix = ''
image_extension = 'png'
label_suffix = ''
label_extension = 'png'
original_image_size = (360, 480) # (height, width).
#resized_image_size = None
resized_image_size = (224, 224) # (height, width).
random_crop_size = None
#random_crop_size = (224, 224) # (height, width).
center_crop_size = None
if center_crop_size is not None:
image_size = center_crop_size
elif random_crop_size is not None:
image_size = random_crop_size
elif resized_image_size is not None:
image_size = resized_image_size
else:
image_size = original_image_size
image_shape = image_size + (3,)
# Provide the same seed and keyword arguments to the fit and flow methods.
seed = 1
# REF [file] >> ${SWL_PYTHON_HOME}/test/machine_learning/keras/camvid_dataset_test.py
dataset_generator_type = 2
if 0 == dataset_generator_type:
train_images, train_labels, val_images, val_labels, test_images, test_labels, num_classes0 = load_camvid_dataset(
train_image_dir_path, train_label_dir_path, val_image_dir_path, val_label_dir_path, test_image_dir_path, test_label_dir_path,
data_suffix=image_suffix, data_extension=image_extension, label_suffix=label_suffix, label_extension=label_extension,
width=resized_image_size[1], height=resized_image_size[0])
assert num_classes == num_classes0, '[Warning] The number of classes is unmatched.'
# Preprocessing (normalization, standardization, etc).
train_images, train_labels = preprocess_camvid_dataset(train_images, train_labels, num_classes)
val_images, val_labels = preprocess_camvid_dataset(val_images, val_labels, num_classes)
test_images, test_labels = preprocess_camvid_dataset(test_images, test_labels, num_classes)
# FIXME [fix] >> A dataset generator for images(data) and labels per image.
# - Images are only transformed, but labels are not transformed.
train_dataset_gen, val_dataset_gen, test_dataset_gen = create_camvid_generator_from_array(
train_images, train_labels, val_images, val_labels, test_images, test_labels,
data_suffix=image_suffix, data_extension=image_extension, label_suffix=label_suffix, label_extension=label_extension,
batch_size=batch_size, random_crop_size=random_crop_size, center_crop_size=center_crop_size, shuffle=shuffle, seed=None)
elif 1 == dataset_generator_type:
# NOTICE [caution] >>
# - resized_image_size should be not None.
# - Each input directory should contain one subdirectory per class.
# - Images are loaded as either a RGB or gray color.
train_dataset_gen = create_camvid_generator_from_directory(train_image_dir_path, train_label_dir_path,
batch_size=batch_size, resized_image_size=resized_image_size, random_crop_size=random_crop_size, center_crop_size=center_crop_size, shuffle=shuffle, seed=seed)
elif 2 == dataset_generator_type:
train_images, train_labels, val_images, val_labels, test_images, test_labels, num_classes0 = load_camvid_dataset(
train_image_dir_path, train_label_dir_path, val_image_dir_path, val_label_dir_path, test_image_dir_path, test_label_dir_path,
data_suffix=image_suffix, data_extension=image_extension, label_suffix=label_suffix, label_extension=label_extension,
width=resized_image_size[1], height=resized_image_size[0])
assert num_classes == num_classes0, '[Warning] The number of classes is unmatched.'
seq = get_imgaug_sequence_for_camvid(width=image_shape[1], height=image_shape[0])
gen_type = -1
if 0 == gen_type:
# NOTICE [caution] >> Not thread-safe.
train_dataset_gen = create_dataset_generator_using_imgaug(seq, train_images, train_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
val_dataset_gen = create_dataset_generator_using_imgaug(seq, val_images, val_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
test_dataset_gen = create_dataset_generator_using_imgaug(seq, test_images, test_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
elif 1 == gen_type:
train_dataset_gen = DatasetGeneratorUsingImgaug(seq, train_images, train_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
val_dataset_gen = DatasetGeneratorUsingImgaug(seq, val_images, val_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
test_dataset_gen = DatasetGeneratorUsingImgaug(seq, test_images, test_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset)
else:
train_dataset_gen = ThreadSafeGenerator(create_dataset_generator_using_imgaug(seq, train_images, train_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset))
val_dataset_gen = ThreadSafeGenerator(create_dataset_generator_using_imgaug(seq, val_images, val_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset))
test_dataset_gen = ThreadSafeGenerator(create_dataset_generator_using_imgaug(seq, test_images, test_labels, num_classes, batch_size=batch_size, shuffle=shuffle, dataset_preprocessing_function=preprocess_camvid_dataset))
else:
assert dataset_generator_type < 3, 'Invalid dataset generator type.'
#--------------------------------------------------------------------
# Create a FC-DenseNet model.
print('Create a FC-DenseNet model.')
with tf.name_scope('fc-densenet'):
fc_densenet_model = dc.DenseNetFCN(image_shape, nb_dense_block=5, growth_rate=16, nb_layers_per_block=4, upsampling_type='upsampling', classes=num_classes)
# Display the model summary.
#fc_densenet_model.summary()
#--------------------------------------------------------------------
# Prepare training.
class_weighting = [
0.2595,
0.1826,
4.5640,
0.1417,
0.5051,
0.3826,
9.6446,
1.8418,
6.6823,
6.2478,
3.0,
7.3614
]
# Learning rate scheduler.
def step_decay(epoch):
initial_learning_rate = 0.001
drop = 0.00001
epochs_drop = 10.0
learning_rate = initial_learning_rate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
return learning_rate
# Learning schedule callback.
learning_rate_callback = callbacks.LearningRateScheduler(step_decay)
# Checkpoint.
tensor_board_callback = callbacks.TensorBoard(log_dir=train_summary_dir_path, histogram_freq=5, write_graph=True, write_images=True)
reduce_lr_on_plateau_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
model_checkpoint_callback = callbacks.ModelCheckpoint(model_checkpoint_best_filepath, monitor='val_acc', verbose=2, save_best_only=True, save_weights_only=False, mode='max')
#model_checkpoint_callback = callbacks.ModelCheckpoint(model_checkpoint_filepath, monitor='val_acc', verbose=2, save_best_only=False, save_weights_only=False, mode='max')
# NOTICE [caution] >> Out of memory.
#callback_list = [learning_rate_callback, tensor_board_callback, reduce_lr_on_plateau_callback, model_checkpoint_callback]
#callback_list = [tensor_board_callback, model_checkpoint_callback]
callback_list = [model_checkpoint_callback]
#optimizer = optimizers.SGD(lr=0.01, decay=1.0e-7, momentum=0.95, nesterov=False)
optimizer = optimizers.RMSprop(lr=1.0e-5, decay=1.0e-9, rho=0.9, epsilon=1.0e-8)
#optimizer = optimizers.Adagrad(lr=0.01, decay=1.0e-7, epsilon=1.0e-8)
#optimizer = optimizers.Adadelta(lr=1.0, decay=0.0, rho=0.95, epsilon=1.0e-8)
#optimizer = optimizers.Adam(lr=1.0e-5, decay=1.0e-9, beta_1=0.9, beta_2=0.999, epsilon=1.0e-8)
#optimizer = optimizers.Adamax(lr=0.002, decay=0.0, beta_1=0.9, beta_2=0.999, epsilon=1.0e-8)
#optimizer = optimizers.Nadam(lr=0.002, schedule_decay=0.004, beta_1=0.9, beta_2=0.999, epsilon=1.0e-8)
#--------------------------------------------------------------------
def display_history(history):
# List all data in history.
print(history.history.keys())
# Summarize history for accuracy.
fig = plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig.savefig(output_dir_path + '/model_accuracy.png')
plt.close(fig)
# Summarize history for loss.
fig = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig.savefig(output_dir_path + '/model_loss.png')
plt.close(fig)
#--------------------------------------------------------------------
# Train the FC-DenseNet model.
TRAINING_MODE = 0 # Start training a model.
#TRAINING_MODE = 1 # Resume training a model.
#TRAINING_MODE = 2 # Use a trained model.
if 0 == TRAINING_MODE:
initial_epoch = 0
print('Start training...')
elif 1 == TRAINING_MODE:
initial_epoch = 1000
print('Resume training...')
elif 2 == TRAINING_MODE:
initial_epoch = 0
print('Use a trained model.')
else:
raise Exception('Invalid TRAINING_MODE')
if 1 == TRAINING_MODE or 2 == TRAINING_MODE:
# Deserialize a model from JSON.
#with open(model_json_filepath, 'r') as json_file:
# fc_densenet_model = models.model_from_json(json_file.read())
# Deserialize weights into the model.
#fc_densenet_model.load_weights(model_weight_filepath)
# Load a full model.
fc_densenet_model = models.load_model(model_filepath)
#fc_densenet_model = models.load_model(model_filepath.format(num_epochs))
print('Restored a FC-DenseNet model.')
if 0 == TRAINING_MODE or 1 == TRAINING_MODE:
fc_densenet_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = fc_densenet_model.fit_generator(train_dataset_gen,
steps_per_epoch=steps_per_epoch, epochs=num_epochs, initial_epoch=initial_epoch,
#validation_data=val_dataset_gen, validation_steps=steps_per_epoch,
validation_data=test_dataset_gen, validation_steps=steps_per_epoch,
#max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing,
class_weight=class_weighting, callbacks=callback_list, verbose=1)
display_history(history)
# Serialize a model to JSON.
#with open(model_json_filepath, 'w') as json_file:
# json_file.write(fc_densenet_model.to_json())
# Serialize weights to HDF5.
#fc_densenet_model.save_weights(model_weight_filepath) # Save the model's weights.
# Save a full model.
#fc_densenet_model.save(model_filepath.format(num_epochs))
print('Saved a FC-DenseNet model.')
if 0 == TRAINING_MODE or 1 == TRAINING_MODE:
print('End training...')
#--------------------------------------------------------------------
# Evaluate the FC-DenseNet model.
print('Start testing...')
num_test_examples = 233
steps_per_epoch = num_test_examples // batch_size if num_test_examples > 0 else 50
if steps_per_epoch < 1:
steps_per_epoch = 1
test_loss, test_accuracy = fc_densenet_model.evaluate_generator(test_dataset_gen, steps=steps_per_epoch) #, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)
print('Test loss = {}, test accuracy = {}'.format(test_loss, test_accuracy))
print('End testing...')
#--------------------------------------------------------------------
# Predict.
print('Start prediction...')
predictions = fc_densenet_model.predict_generator(test_dataset_gen, steps=steps_per_epoch, verbose=0) #, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)
for idx in range(predictions.shape[0]):
prediction = np.argmax(predictions[idx], axis=-1)
#plt.imshow(prediction, cmap='gray')
plt.imsave(prediction_dir_path + '/prediction' + str(idx) + '.jpg', prediction, cmap='gray')
print('End prediction...')
#--------------------------------------------------------------------
# Display.
for batch_images, batch_labels in test_dataset_gen:
break
batch_predictions = fc_densenet_model.predict(batch_images, batch_size=batch_size, verbose=0)
idx = 0
#plt.figure(figsize=(15,5))
plt.subplot(131)
plt.imshow((batch_images[idx] - np.min(batch_images[idx])) / (np.max(batch_images[idx]) - np.min(batch_images[idx])))
plt.subplot(132)
plt.imshow(np.argmax(batch_labels[idx], axis=-1), cmap='gray')
plt.subplot(133)
plt.imshow(np.argmax(batch_predictions[idx], axis=-1), cmap='gray')
| gpl-2.0 |
jakevdp/sklearn_pycon2014 | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
rr1964/Caterpillar | anonymizedAnaylsis.py | 1 | 6082 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 15:33:30 2017
@author: Randall Reese
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 10:36:56 2017
@author: reeserd2
"""
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble.partial_dependence import plot_partial_dependence, partial_dependence
from patsy import dmatrices, dmatrix
np.random.seed(0)
#%%
visitorData = pd.read_csv("anonymousData.csv")
X = StandardScaler().fit_transform(visitorData[[0,1,2,3,4,5,6,7]])
#%%
### Two dimensional plotting of clustering.
colors = np.array([x for x in 'grc'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans']#, 'AffinityPropagation', 'MeanShift',
#'SpectralClustering', 'Ward', 'AgglomerativeClustering',
#'DBSCAN', 'Birch']
fig = plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
##ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plot_num = 1
## estimate bandwidth for mean shift
#bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
#
## connectivity matrix for structured Ward
#connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
## make connectivity symmetric
#connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
#ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
#two_means = cluster.MiniBatchKMeans(n_clusters=2)
k_means = cluster.MiniBatchKMeans(n_clusters=3)
#ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
# connectivity=connectivity)
#spectral = cluster.SpectralClustering(n_clusters=2,
# eigen_solver='arpack',
# affinity="nearest_neighbors")
#dbscan = cluster.DBSCAN(eps=.2)
#affinity_propagation = cluster.AffinityPropagation(damping=.9,
# preference=-200)
#
#average_linkage = cluster.AgglomerativeClustering(
# linkage="average", affinity="cityblock", n_clusters=2,
# connectivity=connectivity)
#
#birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
k_means]#, affinity_propagation, ms, spectral, ward, average_linkage,
#dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
#if i_dataset == 0:
# plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0],centers[:, 1], s=100, c=center_colors)
plt.xlim(-0.3, 10)
plt.ylim(-1.5, 9)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
#%%
###Three dimensional plotting of clustering.
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
#make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
##'spectral': cluster.SpectralClustering(n_clusters=3, eigen_solver='arpack', affinity="nearest_neighbors")
estimators = {'kMeans-3': KMeans(n_clusters=3),
'Birch':cluster.Birch(n_clusters=3),
'Ward (n_cluster = 3)': cluster.AgglomerativeClustering(n_clusters=3, linkage='ward', connectivity=connectivity),
'Ward (n_cluster = 4)': cluster.AgglomerativeClustering(n_clusters=4, linkage='ward', connectivity=connectivity)
}
y = {}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
if hasattr(est, 'labels_'):
y[name] = est.labels_.astype(np.int)
else:
y[name] = est.predict(X)
labels = est.labels_
#ax.scatter(X[:, 1], X[:, 3], X[:, 7], c=labels.astype(np.float))
#ax.scatter(visitorData.medHitTotal, visitorData.medRevenuePerSess, visitorData.percentSessWithPurchase, c=labels.astype(np.float))
ax.scatter(visitorData.medHitTotal, visitorData['medRevenuePerSess'], visitorData.percentSessWithPurchase, c=labels.astype(np.float))
ax.text2D(0.05, 0.95, name, transform=ax.transAxes)
#ax.w_xaxis.set_ticklabels([])
#ax.w_yaxis.set_ticklabels([])
#ax.w_zaxis.set_ticklabels([])
#ax.set_zlabel('% Purchase')
ax.set_zlabel('Percent Sessions with Purchase')
ax.set_xlabel('HitNumber')
ax.set_ylabel('Revenue')
fignum = fignum + 1
#%%
###PCA
pca = PCA(n_components=4, svd_solver='arpack')
pca.fit(X)
#PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,
#svd_solver='auto', tol=0.0, whiten=False)
print(pca.explained_variance_ratio_)
print(sum(pca.explained_variance_ratio_))
#%%
pca.components_[0:1]
#%%
pca.components_[1:2]
#%%
pca.components_[2:3]
#%%
pca.components_[3:4]
#%%
| gpl-3.0 |
mklauser/tardis-OLD | tardis/util.py | 2 | 13280 | # Utilities for TARDIS
from astropy import units as u, constants, units
import numpy as np
import os
import yaml
import logging
import atomic
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") - supplied %s' % self.malformed_element_symbol
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting an atomic symbol (e.g. Fe) - supplied %s' % self.malformed_element_symbol
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return 'Expecting a quantity string(e.g. "5 km/s") for keyword - supplied %s' % self.malformed_quantity_string
logger = logging.getLogger(__name__)
synpp_default_yaml_fname = os.path.join(os.path.dirname(__file__), 'data', 'synpp_default.yaml')
def int_to_roman(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
input = int(input)
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def roman_to_int(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if type(input) != type(""):
raise TypeError, "expected string, got %s" % type(input)
input = input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input:
if not c in nums:
raise ValueError, "input is not a valid roman numeral: %s" % input
for i in range(len(input)):
c = input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum = 0
for n in places: sum += n
# Easiest test for validity...
if int_to_roman(sum) == input:
return sum
else:
raise ValueError, 'input is not a valid roman numeral: %s' % input
def calculate_luminosity(spec_fname, distance, wavelength_column=0, wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
logger.warning('Currently only works with Si and a special setup')
if not radial1d_mdl.atom_data.has_synpp_refs:
raise ValueError(
'The current atom dataset does not contain the necesarry reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].ix[key] = np.log10(
radial1d_mdl.plasma_array.tau_sobolevs[0].ix[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
yaml_reference = yaml.load(file(synpp_default_yaml_fname))
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float((radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float((radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
yaml.dump(yaml_reference, stream=file(fname, 'w'), explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
"""
beta_rad = 1 / (k_B_cgs * T)
return (2 * (h_cgs * nu ** 3) / (c_cgs ** 2)) / (
np.exp(h_cgs * nu * beta_rad) - 1)
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def species_tuple_to_string(species_tuple, roman_numerals=True):
atomic_number, ion_number = species_tuple
element_symbol = atomic.atomic_number2symbol[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '%s %s' % (element_symbol, roman_ion_number)
else:
return '%s %d' % (element_symbol, ion_number)
def species_string_to_tuple(species_string):
try:
element_string, ion_number_string = species_string.split()
except ValueError:
raise MalformedElementSymbolError(species_string)
atomic_number = element_symbol2atomic_number(element_string)
try:
ion_number = roman_to_int(ion_number_string.strip())
except ValueError:
try:
ion_number = np.int64(ion_number_string)
except ValueError:
raise MalformedSpeciesError
if ion_number > atomic_number:
raise ValueError('Species given does not exist: ion number > atomic number')
return atomic_number, ion_number-1
def parse_quantity(quantity_string):
if not isinstance(quantity_string, basestring):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in atomic.symbol2atomic_number:
raise MalformedElementSymbolError(element_string)
return atomic.symbol2atomic_number[reformatted_element_string]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent letters lowercase
Parameters
----------
element_symbol: str
Returns
-------
reformated element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
| bsd-3-clause |
oscarbranson/latools | Supplement/comparison_tools/stats.py | 1 | 7199 | import statsmodels.api as sm
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def fmt_RSS(x):
"""
Calculate RSS and format as string.
"""
return 'RSS: {sumsq:.2f}'.format(sumsq=np.sqrt(np.nansum((x)**2)))
def pairwise_reproducibility(df, plot=False):
"""
Calculate the reproducibility of LA-ICPMS based on unique pairs of repeat analyses.
Pairwise differences are fit with a half-Cauchy distribution, and the median and
95% confidence limits are returned for each analyte.
Parameters
----------
df : pandas.DataFrame
A dataset
plot : bool
Whether or not to plot the resulting error distributions.
Returns
-------
pdiffs : pandas.DataFrame
Unique pairwise differences for all analytes.
rep_dists : dict of scipy.stats.halfcauchy
Half-Cauchy distribution objects fitted to the
differences.
rep_stats : dict of tuples
The 50% and 95% quantiles of the half-cauchy
distribution.
(fig, axs) : matplotlib objects
The figure. If not made, returnes (None, None) placeholder
"""
ans = df.columns.values
pdifs = []
# calculate differences between unique pairs
for ind, d in df.groupby(level=0):
d.index = d.index.droplevel(0)
difs = []
for i, r in d.iterrows():
t = d.loc[i+1:, :]
difs.append(t[ans] - r[ans])
pdifs.append(pd.concat(difs))
pdifs = pd.concat(pdifs).abs()
# calculate stats
rep_stats = {}
rep_dists = {}
errfn = stats.halfcauchy
for a in ans:
d = pdifs.loc[:, a].dropna().values
hdist = errfn.fit(d, floc=0)
rep_dists[a] = errfn(*hdist)
rep_stats[a] = rep_dists[a].ppf((0.5, 0.95))
# make plot
if not plot:
return pdifs, rep_dists, rep_stats, (None, None)
fig, axs = plt.subplots(1, len(ans), figsize=[len(ans) * 2, 2])
for a, ax in zip(ans, axs):
d = pdifs.loc[:, a].dropna().values
hist, edges, _ = ax.hist(d, 30)
ax.plot(edges, rep_dists[a].pdf(edges) * (sum(hist) * np.mean(np.diff(edges))))
ax.set_title(a, loc='left')
return pdifs, rep_dists, rep_stats, (fig, axs)
def comparison_stats(df, els=['Mg', 'Sr', 'Ba', 'Al', 'Mn']):
"""
Compute comparison stats for test and LAtools data.
Population-level similarity assessed by a Kolmogorov-Smirnov test.
Individual similarity assessed by a pairwise Wilcoxon signed rank test.
Trends in residuals assessed by regression analysis, where significance of
the slope and intercept is determined by t-tests (both relative to zero).
Parameters
----------
df : pandas.DataFrame
A dataframe containing reference ('X/Ca_r'), test user
('X/Ca_t') and LAtools ('X123') data.
els : list
list of elements (names only) to plot.
Returns
-------
pandas.DataFrame
"""
# get corresponding analyte and ratio names
As = []
Rs = []
analytes = [c for c in df.columns if ('_r' not in c) and ('_t' not in c)]
ratios = [c for c in df.columns if ('_r' in c)]
for e in els:
if e == 'Sr':
As.append('Sr88_Ca43')
elif e == 'Mg':
As.append('Mg24_Ca43')
else:
As.append([a for a in analytes if e in a][0])
Rs.append([r for r in ratios if e in r][0][:-2])
yt_stats = []
yl_stats = []
for i, (e, a) in enumerate(zip(Rs, As)):
if a == 'Ba138':
m = 1e3
u = '$\mu$mol/mol'
else:
m = 1
u = 'mmol/mol'
x = df.loc[:, e + '_r'].values * m
yt = df.loc[:, e + '_t'].values * m
yl = df.loc[:, a].values * m
yt_stats.append(summary_stats(x, yt, e))
yl_stats.append(summary_stats(x, yl, e))
yt_stats = pd.concat(yt_stats).T
yl_stats = pd.concat(yl_stats).T
return pd.concat([yt_stats, yl_stats], keys=['Test User', 'LAtools']).T
def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'),
('Residual Summary', 'Median'),
('Residual Summary', 'LQ'),
('Residual Summary', 'IQR'),
('Residual Summary', 'UQ'),
('Residual Regression', 'Slope'),
('Residual Regression', 'Slope t'),
('Residual Regression', 'Slope p'),
('Residual Regression', 'Intercept'),
('Residual Regression', 'Intercept t'),
('Residual Regression', 'Intercept p'),
('Residual Regression', 'R2'),
('Kolmogorov-Smirnov', 'KS'),
('Kolmogorov-Smirnov', 'p')])
out = pd.DataFrame(index=nm, columns=cols)
# remove nan values
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
# calculate residuals
r = y - x
# summary statistics
cat = 'Residual Summary'
out.loc[:, (cat, 'N')] = len(x)
out.loc[:, (cat, 'Median')] = np.median(r)
out.loc[:, [(cat, 'LQ'), (cat, 'UQ')]] = np.percentile(r, [25, 75])
out.loc[:, (cat, 'IQR')] = out.loc[:, (cat, 'UQ')] - out.loc[:, (cat, 'LQ')]
# non-paired test for same distribution
cat = 'Kolmogorov-Smirnov'
ks = stats.ks_2samp(x, y)
out.loc[:, (cat, 'KS')] = ks.statistic
out.loc[:, (cat, 'p')] = ks.pvalue
# regression analysis of residuals - slope should be 0, intercept should be 0
cat = 'Residual Regression'
X = sm.add_constant(x)
reg = sm.OLS(r, X, missing='drop')
fit = reg.fit()
out.loc[:, [(cat, 'Intercept'), (cat, 'Slope')]] = fit.params
out.loc[:, [(cat, 'Intercept t'), (cat, 'Slope t')]] = fit.tvalues
out.loc[:, (cat, 'R2')] = fit.rsquared
out.loc[:, [(cat, 'Intercept p'), (cat, 'Slope p')]] = fit.pvalues
return out | mit |
JsNoNo/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
Achuth17/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
JT5D/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 31 | 3340 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
print(clf.threshold)
assert_raises(Exception, clf.predict, X)
assert_raises(Exception, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
simonsfoundation/inferelator_ng | inferelator_ng/tests/test_prior.py | 3 | 8009 | import unittest
from .. import prior
import pandas as pd
import numpy as np
import subprocess
class TestPrior(unittest.TestCase):
def setup_test_files(self):
self.motifs = [('chr1', '10', '15', 'TF1', '.', '-'),
('chr1', '18', '22', 'TF1', '.', '+'),
('chr2', '50', '56', 'TF1', '.', '+'),
('chr1', '100', '107', 'TF2', '.', '-'),
('chr1', '103', '108', 'TF3', '.', '+'),
('chr1', '150', '154', 'TF4', '.', '+')]
self.tss = [('chr1', '20', '21', 'gene1', '.', '+'),
('chr1', '20', '21', 'gene2', '.', '-'),
('chr1', '120', '121', 'gene3', '.', '+')]
self.genes = [('chr1', '20', '45', 'gene2', '.', '+'),
('chr1', '5', '21', 'gene1', '.', '-'),
('chr1', '120', '150', 'gene3', '.', '+')]
self.target_genes = ['gene1', 'gene2', 'gene3']
self.regulators = regulators = ['TF1', 'TF2', 'TF3', 'TF4']
def test_prior_empty_tfs_and_targets_lists(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs, self.genes, [], [], 'closest', 100)
self.assertEqual(prior_object.make_prior().size, 0)
def test_prior_closest_zero_distance(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest', 0)
expected_prior = pd.DataFrame([[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_closest_zero_distance_genes_with_multiple_tss_at_different_locations(self):
self.setup_test_files()
self.tss.append(('chr1', '100', '101', 'gene1', '.', '+'),)
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest', 0)
expected_prior = pd.DataFrame([[1, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_closest_zero_distance_genes_with_multiple_tss_at_same_location(self):
self.setup_test_files()
self.tss.append(('chr1', '19', '20', 'gene1', '.', '+'),)
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest', 0)
expected_prior = pd.DataFrame([[2, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_window_TSS_zero_distance(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'window', 0)
expected_prior = pd.DataFrame([[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_window_geneBody_zero_distance(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.genes,
self.target_genes,
self.regulators,
'window', 0)
expected_prior = pd.DataFrame([[2, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_closestTSS_default(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest')
expected_prior = pd.DataFrame([[2, 0, 0, 0],
[2, 0, 0, 0],
[0, 1, 1, 1]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_closestTSS_ignore_downstream(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest', ignore_downstream = True)
expected_prior = pd.DataFrame([[2, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_windowGeneBody_1000(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.genes,
self.target_genes,
self.regulators,
'window', 1000)
expected_prior = pd.DataFrame([[2, 1, 1, 1],
[2, 1, 1, 1],
[2, 1, 1, 1]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
def test_prior_number_of_targets_2(self):
self.setup_test_files()
prior_object = prior.Prior(self.motifs,
self.tss,
self.target_genes,
self.regulators,
'closest', number_of_targets = 2)
expected_prior = pd.DataFrame([[2, 1, 1, 1],
[2, 1, 1, 1],
[0, 1, 1, 1]],
index = ['gene1', 'gene2', 'gene3'],
columns = ['TF1', 'TF2', 'TF3', 'TF4'])
self.assertTrue(prior_object.make_prior().equals(expected_prior))
| bsd-2-clause |
tylerjereddy/scipy | tools/refguide_check.py | 7 | 31826 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'scipy.optimize.show_options',
'scipy.integrate.quad_explain',
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor', 'deprecated',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
self._had_unexpected_error = False
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
# Ignore name errors after failing due to an unexpected exception
exception_type = exc_info[0]
if self._had_unexpected_error and exception_type is NameError:
return
self._had_unexpected_error = True
self._report_item_name(out)
return super().report_unexpected_exception(
out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
from scipy._lib._util import _fixed_default_rng
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd(), \
redirect_stderr(tmp_stderr), \
_fixed_default_rng():
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch():
pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
cgre-aachen/gempy | examples/tutorials/ch5_probabilistic_modeling_DEP/ch5_2_introduction_pymc3.py | 1 | 4389 | """
5 2 - Introduction PyMC3.
=========================
"""
# %%
# Importing GemPy
import gempy as gp
# Importing auxiliary libraries
import os
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
import arviz as az
from gempy.bayesian import plot_posterior as pp
from importlib import reload
from matplotlib.ticker import StrMethodFormatter
# %%
# Model definition
# ----------------
#
# %%
# This is to make it work in sphinx gallery
cwd = os.getcwd()
if not 'examples' in cwd:
path_dir = os.getcwd()+'/examples/tutorials/ch5_probabilistic_modeling'
else:
path_dir = cwd
# %%
geo_model = gp.load_model(r'/2-layers', path=path_dir+'/2-layers', recompile=True)
# %%
geo_model.modify_surface_points(2, Z=1000)
gp.compute_model(geo_model)
# %%
def plot_geo_setting_well():
device_loc = np.array([[6e3, 0, 3700]])
p2d = gp.plot_2d(geo_model, show_topography=True, legend=False)
well_1 = 3.41e3
well_2 = 3.6e3
p2d.axes[0].scatter([3e3], [well_1], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter([9e3], [well_2], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter(device_loc[:, 0], device_loc[:, 2], marker='x', s=400, c='#DA8886', zorder=10)
p2d.axes[0].vlines(3e3, .5e3, well_1, linewidth=4, color='gray')
p2d.axes[0].vlines(9e3, .5e3, well_2, linewidth=4, color='gray')
p2d.axes[0].vlines(3e3, .5e3, well_1)
p2d.axes[0].vlines(9e3, .5e3, well_2)
p2d.axes[0].set_xlim(2900, 3100)
plt.savefig('well.svg')
plt.show()
# %%
plot_geo_setting_well()
# Thickness measurements
# ----------------------
# %%
y_obs = [2.12]
y_obs_list = [2.12, 2.06, 2.08, 2.05, 2.08, 2.09,
2.19, 2.07, 2.16, 2.11, 2.13, 1.92]
np.random.seed(4003)
# %%
# Normal-several points
# ~~~~~~~~~~~~~~~~~~~~~
# .. image:: /../../_static/computational_graph1.png
#
# %%
with pm.Model() as model:
mu = pm.Normal('$\mu$', 2.08, .07)
sigma = pm.Gamma('$\sigma$', 0.3, 3)
y = pm.Normal('$y$', mu, sigma, observed=y_obs_list)
# %%
mu
# %%
sigma
# %%
y
# %%
# Sampling
# --------
#
# %%
with model:
prior = pm.sample_prior_predictive(1000)
trace = pm.sample(1000, discard_tuned_samples=False, cores=1)
post = pm.sample_posterior_predictive(trace)
# %%
data = az.from_pymc3(trace=trace,
prior=prior,
posterior_predictive=post)
# %%
az.plot_trace(data)
plt.show()
# %%
# Raw observations:
# ^^^^^^^^^^^^^^^^^
#
# %%
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 3), joyplot=False, marginal=False)
p.plot_normal_likelihood('$\mu$', '$\sigma$', '$y$', iteration=-1, hide_bell=True)
p.likelihood_axes.set_xlim(1.90, 2.2)
p.likelihood_axes.xaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}'))
for tick in p.likelihood_axes.get_xticklabels():
tick.set_rotation(45)
plt.show()
# %%
# Final inference
# ^^^^^^^^^^^^^^^
#
# %%
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 3), joyplot=False, marginal=False)
p.plot_normal_likelihood('$\mu$', '$\sigma$', '$y$', iteration=-1, hide_lines=True)
p.likelihood_axes.set_xlim(1.70, 2.40)
p.likelihood_axes.xaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}'))
for tick in p.likelihood_axes.get_xticklabels():
tick.set_rotation(45)
plt.show()
# %%
# Joyplot
# ~~~~~~~
#
# %%
# %matplotlib inline
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 9), joyplot=True, marginal=False, likelihood=False, n_samples=31)
p.plot_joy(('$\mu$', '$\sigma$'), '$y$', iteration=14)
plt.show()
# %%
# Join probability
# ~~~~~~~~~~~~~~~~
#
# %%
# sphinx_gallery_thumbnail_number = 6
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 5), joyplot=False, marginal=True, likelihood=True)
p.plot_marginal(var_names=['$\mu$', '$\sigma$'],
plot_trace=False, credible_interval=.93, kind='kde')
p.plot_normal_likelihood('$\mu$', '$\sigma$', '$y$', iteration=-1, hide_lines=True)
p.likelihood_axes.set_xlim(1.70, 2.40)
plt.show()
# %%
# Full plot
# ~~~~~~~~~
#
# %%
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 5), joyplot=True, marginal=True, likelihood=True, n_samples=11)
p.plot_posterior(['$\mu$', '$\sigma$'], ['$\mu$', '$\sigma$'], '$y$',
marginal_kwargs={'plot_trace': False, 'credible_interval': .93, 'kind': 'kde'})
plt.show() | lgpl-3.0 |
google-research/korvapuusti | listening_test_summer_2020/analysis/data_generation/distributions.py | 1 | 4943 | # Lint as: python3
"""Sampling from a SN and a uniform distribution.
Functionality to sample from a Skew Normal distribution and a continuous
log uniform distribution.
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
def calculate_shift_scaling_skewnorm(desired_variance: int, desired_mean: int,
alpha: int) -> Tuple[float, float]:
"""Calculates the shift and scale to get a desired mean and variance.
Calculate the desired scaling and shifting parameters to get a desired
mean and variance from the Skew Normal Distribution.
I.e., if X ∼ SN(0, 1, alpha), and Y = scale * X + shift, what scale and shift
do we need to get the desired mean and variance of Y ∼ SK(shift, scale, alpha)
Derived from: https://en.wikipedia.org/wiki/Skew_normal_distribution
Args:
desired_variance: the variance we want our variable to have
desired_mean: the mean we want our variable to have
alpha: the skewness parameter of the SN distribution
Returns:
The shift and scale parameters.
"""
delta = (alpha / np.sqrt(1 + alpha**2))
scaling = np.sqrt(desired_variance / (1 - (2 * delta**2) / np.pi))
shift = desired_mean - scaling * delta * np.sqrt(2 / np.pi)
return shift, scaling
def plot_skewed_distribution(shift: float,
scale: float,
path: str,
num_samples=1000,
alpha=-4):
"""Plots the distribution SN(shift, scaling**2, alpha)."""
_, ax = plt.subplots(1, 1)
x = np.linspace(
scipy.stats.skewnorm.ppf(0.01, alpha, loc=shift, scale=scale),
scipy.stats.skewnorm.ppf(0.99, alpha, loc=shift, scale=scale), 100)
ax.plot(
x,
scipy.stats.skewnorm.pdf(x, alpha, loc=shift, scale=scale),
"r-",
lw=5,
alpha=0.6,
label="skewnorm pdf")
mean, var = scipy.stats.skewnorm.stats(alpha, loc=shift, scale=scale)
plt.title("SN - mean %.2f, var. %.2f, "
"std.dev. %.2f" % (mean, var, np.sqrt(var)))
r = scipy.stats.skewnorm.rvs(alpha, loc=shift, scale=scale, size=num_samples)
ax.hist(r, density=True, histtype="stepfilled", alpha=0.2)
ax.legend(loc="best", frameon=False)
plt.savefig(path)
return
def sample_skewed_distribution(shift: float, scale: float, alpha: int,
num_samples: int) -> np.ndarray:
"""Samples from num_samples from X ∼ SN(loc, scale**2, alpha)."""
return scipy.stats.skewnorm.rvs(
alpha, loc=shift, scale=scale, size=num_samples)
def calculate_shift_scaling_loguniform(desired_lowerbound: int,
desired_upperbound: int,
log_base: int) -> Tuple[float, float]:
loc = math.log(desired_lowerbound, log_base)
scale = math.log(desired_upperbound, log_base) - loc
return loc, scale
def plot_uniform_distribution(num_samples: int, shift: float, scale: float,
path: str):
"""Plots the distribution U[shift, shift + scale]."""
_, ax = plt.subplots(1, 1)
x = np.linspace(scipy.stats.uniform.ppf(0.01, loc=shift, scale=scale),
scipy.stats.uniform.ppf(0.99, loc=shift, scale=scale), 100)
ax.plot(x,
scipy.stats.uniform.pdf(x, loc=shift, scale=scale),
"r-", lw=5, alpha=0.6, label="uniform pdf")
mean, var = scipy.stats.uniform.stats(moments="mv", loc=shift, scale=scale)
plt.title("U - mean %.2f, var. %.2f, "
"std.dev. %.2f" % (mean, var, np.sqrt(var)))
r = scipy.stats.uniform.rvs(size=num_samples, loc=shift, scale=scale)
ax.hist(r, density=True, histtype="stepfilled", alpha=0.2)
ax.legend(loc="best", frameon=False)
plt.savefig(path)
return
def sample_log_distribution(num_samples: int, log_base: int, shift: float,
scale: float) -> np.ndarray:
"""Samples from a log-uniform distribution by log-scaling a uniform dist."""
sampled_values = scipy.stats.uniform.rvs(
size=num_samples, loc=shift, scale=scale)
return log_base**sampled_values
def sample_uniform_distribution(num_samples: int, a: int, b: int):
"""Samples num_samples times uniformly between a and b."""
loc = a
scale = b - loc
return scipy.stats.uniform.rvs(size=num_samples, loc=loc, scale=scale)
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.11/_downloads/plot_find_eog_artifacts.py | 19 | 1219 | """
==================
Find EOG artifacts
==================
Locate peaks of EOG to spot blinks and general EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 998
eog_events = mne.preprocessing.find_eog_events(raw, event_id)
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
exclude='bads')
tmin, tmax = -0.2, 0.2
epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected EOG artifacts : %d" % len(data))
###############################################################################
# Plot EOG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('EOG (muV)')
plt.show()
| bsd-3-clause |
zfrenchee/pandas | asv_bench/benchmarks/indexing.py | 1 | 9903 | from .pandas_vb_common import *
class Int64Indexing(object):
goal_time = 0.2
def setup(self):
self.s = Series(np.random.rand(1000000))
def time_getitem_scalar(self):
self.s[800000]
def time_getitem_slice(self):
self.s[:800000]
def time_getitem_list_like(self):
self.s[[800000]]
def time_getitem_array(self):
self.s[np.arange(10000)]
def time_getitem_lists(self):
self.s[np.arange(10000).tolist()]
def time_iloc_array(self):
self.s.iloc[np.arange(10000)]
def time_iloc_list_like(self):
self.s.iloc[[800000]]
def time_iloc_scalar(self):
self.s.iloc[800000]
def time_iloc_slice(self):
self.s.iloc[:800000]
def time_ix_array(self):
self.s.ix[np.arange(10000)]
def time_ix_list_like(self):
self.s.ix[[800000]]
def time_ix_scalar(self):
self.s.ix[800000]
def time_ix_slice(self):
self.s.ix[:800000]
def time_loc_array(self):
self.s.loc[np.arange(10000)]
def time_loc_list_like(self):
self.s.loc[[800000]]
def time_loc_scalar(self):
self.s.loc[800000]
def time_loc_slice(self):
self.s.loc[:800000]
class StringIndexing(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(1000000)
self.s = Series(np.random.rand(1000000), index=self.index)
self.lbl = self.s.index[800000]
def time_getitem_label_slice(self):
self.s[:self.lbl]
def time_getitem_pos_slice(self):
self.s[:800000]
def time_get_value(self):
self.s.get_value(self.lbl)
class DatetimeIndexing(object):
goal_time = 0.2
def setup(self):
tm.N = 1000
self.ts = tm.makeTimeSeries()
self.dt = self.ts.index[500]
def time_getitem_scalar(self):
self.ts[self.dt]
class DataFrameIndexing(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(1000)
self.columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=self.index,
columns=self.columns)
self.idx = self.index[100]
self.col = self.columns[10]
self.df2 = DataFrame(np.random.randn(10000, 4),
columns=['A', 'B', 'C', 'D'])
self.indexer = (self.df2['B'] > 0)
self.obj_indexer = self.indexer.astype('O')
# duptes
self.idx_dupe = (np.array(range(30)) * 99)
self.df3 = DataFrame({'A': ([0.1] * 1000), 'B': ([1] * 1000),})
self.df3 = concat([self.df3, (2 * self.df3), (3 * self.df3)])
self.df_big = DataFrame(dict(A=(['foo'] * 1000000)))
def time_get_value(self):
self.df.get_value(self.idx, self.col)
def time_get_value_ix(self):
self.df.ix[(self.idx, self.col)]
def time_getitem_scalar(self):
self.df[self.col][self.idx]
def time_boolean_rows(self):
self.df2[self.indexer]
def time_boolean_rows_object(self):
self.df2[self.obj_indexer]
def time_iloc_dups(self):
self.df3.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df3.loc[self.idx_dupe]
def time_iloc_big(self):
self.df_big.iloc[:100, 0]
class IndexingMethods(object):
# GH 13166
goal_time = 0.2
def setup(self):
a = np.arange(100000)
self.ind = pd.Float64Index(a * 4.8000000418824129e-08)
self.s = Series(np.random.rand(100000))
self.ts = Series(np.random.rand(100000),
index=date_range('2011-01-01', freq='S', periods=100000))
self.indexer = ([True, False, True, True, False] * 20000)
def time_get_loc_float(self):
self.ind.get_loc(0)
def time_take_dtindex(self):
self.ts.take(self.indexer)
def time_take_intindex(self):
self.s.take(self.indexer)
class MultiIndexing(object):
goal_time = 0.2
def setup(self):
self.mi = MultiIndex.from_tuples([(x, y) for x in range(1000) for y in range(1000)])
self.s = Series(np.random.randn(1000000), index=self.mi)
self.df = DataFrame(self.s)
# slicers
np.random.seed(1234)
self.idx = pd.IndexSlice
self.n = 100000
self.mdt = pandas.DataFrame()
self.mdt['A'] = np.random.choice(range(10000, 45000, 1000), self.n)
self.mdt['B'] = np.random.choice(range(10, 400), self.n)
self.mdt['C'] = np.random.choice(range(1, 150), self.n)
self.mdt['D'] = np.random.choice(range(10000, 45000), self.n)
self.mdt['x'] = np.random.choice(range(400), self.n)
self.mdt['y'] = np.random.choice(range(25), self.n)
self.test_A = 25000
self.test_B = 25
self.test_C = 40
self.test_D = 35000
self.eps_A = 5000
self.eps_B = 5
self.eps_C = 5
self.eps_D = 5000
self.mdt2 = self.mdt.set_index(['A', 'B', 'C', 'D']).sortlevel()
self.miint = MultiIndex.from_product(
[np.arange(1000),
np.arange(1000)], names=['one', 'two'])
import string
self.mi_large = MultiIndex.from_product(
[np.arange(1000), np.arange(20), list(string.ascii_letters)],
names=['one', 'two', 'three'])
self.mi_med = MultiIndex.from_product(
[np.arange(1000), np.arange(10), list('A')],
names=['one', 'two', 'three'])
self.mi_small = MultiIndex.from_product(
[np.arange(100), list('A'), list('A')],
names=['one', 'two', 'three'])
rng = np.random.RandomState(4)
size = 1 << 16
self.mi_unused_levels = pd.MultiIndex.from_arrays([
rng.randint(0, 1 << 13, size),
rng.randint(0, 1 << 10, size)])[rng.rand(size) < 0.1]
def time_series_xs_mi_ix(self):
self.s.ix[999]
def time_frame_xs_mi_ix(self):
self.df.ix[999]
def time_multiindex_slicers(self):
self.mdt2.loc[self.idx[
(self.test_A - self.eps_A):(self.test_A + self.eps_A),
(self.test_B - self.eps_B):(self.test_B + self.eps_B),
(self.test_C - self.eps_C):(self.test_C + self.eps_C),
(self.test_D - self.eps_D):(self.test_D + self.eps_D)], :]
def time_multiindex_get_indexer(self):
self.miint.get_indexer(
np.array([(0, 10), (0, 11), (0, 12),
(0, 13), (0, 14), (0, 15),
(0, 16), (0, 17), (0, 18),
(0, 19)], dtype=object))
def time_multiindex_large_get_loc(self):
self.mi_large.get_loc((999, 19, 'Z'))
def time_multiindex_large_get_loc_warm(self):
for _ in range(1000):
self.mi_large.get_loc((999, 19, 'Z'))
def time_multiindex_med_get_loc(self):
self.mi_med.get_loc((999, 9, 'A'))
def time_multiindex_med_get_loc_warm(self):
for _ in range(1000):
self.mi_med.get_loc((999, 9, 'A'))
def time_multiindex_string_get_loc(self):
self.mi_small.get_loc((99, 'A', 'A'))
def time_multiindex_small_get_loc_warm(self):
for _ in range(1000):
self.mi_small.get_loc((99, 'A', 'A'))
def time_is_monotonic(self):
self.miint.is_monotonic
def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
class IntervalIndexing(object):
goal_time = 0.2
def setup(self):
self.monotonic = Series(np.arange(1000000),
index=IntervalIndex.from_breaks(np.arange(1000001)))
def time_getitem_scalar(self):
self.monotonic[80000]
def time_loc_scalar(self):
self.monotonic.loc[80000]
def time_getitem_list(self):
self.monotonic[80000:]
def time_loc_list(self):
self.monotonic.loc[80000:]
class PanelIndexing(object):
goal_time = 0.2
def setup(self):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
self.p.ix[(self.inds, self.inds, self.inds)]
class IndexerLookup(object):
goal_time = 0.2
def setup(self):
self.s = Series(range(10))
def time_lookup_iloc(self):
self.s.iloc
def time_lookup_ix(self):
self.s.ix
def time_lookup_loc(self):
self.s.loc
class BooleanRowSelect(object):
goal_time = 0.2
def setup(self):
N = 10000
np.random.seed(1234)
self.df = DataFrame(np.random.randn(N, 100))
self.bool_arr = np.zeros(N, dtype=bool)
self.bool_arr[:1000] = True
def time_frame_boolean_row_select(self):
self.df[self.bool_arr]
class GetItemSingleColumn(object):
goal_time = 0.2
def setup(self):
np.random.seed(1234)
self.df2 = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df3 = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df2['A']
def time_frame_getitem_single_column_int(self):
self.df3[0]
class AssignTimeseriesIndex(object):
goal_time = 0.2
def setup(self):
N = 100000
np.random.seed(1234)
dx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns(object):
goal_time = 0.2
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N))
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
| bsd-3-clause |
jinzishuai/learn2deeplearn | deeplearning.ai/C4.CNN/week2_DeepModelCaseStudy/hw/KerasTutorial/happyHouse.py | 1 | 14323 | #!/usr/bin/python3
# coding: utf-8
# # Keras tutorial - the Happy House
#
# Welcome to the first assignment of week 2. In this assignment, you will:
# 1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
# 2. See how you can in a couple of hours build a deep learning algorithm.
#
# Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
#
# In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
# In[1]:
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
#get_ipython().magic('matplotlib inline')
# **Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
# ## 1 - The Happy House
#
# For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
#
# <img src="images/happy-house.jpg" style="width:350px;height:270px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
#
#
# As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
#
# You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
#
# <img src="images/house-members.png" style="width:550px;height:250px;">
#
# Run the following code to normalize the dataset and learn about its shapes.
# In[2]:
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# **Details of the "Happy" dataset**:
# - Images are of shape (64,64,3)
# - Training: 600 pictures
# - Test: 150 pictures
#
# It is now time to solve the "Happy" Challenge.
# ## 2 - Building a model in Keras
#
# Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
#
# Here is an example of a model in Keras:
#
# ```python
# def model(input_shape):
# # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# X_input = Input(input_shape)
#
# # Zero-Padding: pads the border of X_input with zeroes
# X = ZeroPadding2D((3, 3))(X_input)
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), name='max_pool')(X)
#
# # FLATTEN X (means convert it to a vector) + FULLYCONNECTED
# X = Flatten()(X)
# X = Dense(1, activation='sigmoid', name='fc')(X)
#
# # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
# model = Model(inputs = X_input, outputs = X, name='HappyModel')
#
# return model
# ```
#
# Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
#
# **Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
#
# **Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
# In[3]:
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
# You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
# 1. Create the model by calling the function above
# 2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
# 3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
# 4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
#
# If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
#
# **Exercise**: Implement step 1, i.e. create the model.
# In[5]:
### START CODE HERE ### (1 line)
happyModel = HappyModel(X_train[0,:,:,:].shape)
### END CODE HERE ###
# **Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
# In[7]:
### START CODE HERE ### (1 line)
happyModel.compile(optimizer = "Adam", loss = "binary_crossentropy", metrics = ["accuracy"])
### END CODE HERE ###
# **Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
# In[8]:
### START CODE HERE ### (1 line)
happyModel.fit(x = X_train, y = Y_train, epochs = 10, batch_size = 20)
### END CODE HERE ###
# Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
#
# **Exercise**: Implement step 4, i.e. test/evaluate the model.
# In[9]:
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(x = X_test, y = Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
# If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets. To pass this assignment, you have to get at least 75% accuracy.
#
# To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
#
# If you have not yet achieved 75% accuracy, here're some things you can play around with to try to achieve it:
#
# - Try using blocks of CONV->BATCHNORM->RELU such as:
# ```python
# X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
# ```
# until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
# - You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
# - Change your optimizer. We find Adam works well.
# - If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
# - Run on more epochs, until you see the train accuracy plateauing.
#
# Even if you have achieved 75% accuracy, please feel free to keep playing with your model to try to get even better results.
#
# **Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
#
# ## 3 - Conclusion
#
# Congratulations, you have solved the Happy House challenge!
#
# Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
# <font color='blue'>
# **What we would like you to remember from this assignment:**
# - Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
# - Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
# ## 4 - Test with your own image (Optional)
#
# Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the following code
# 4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
#
# The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
# In[10]:
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
# ## 5 - Other useful functions in Keras (Optional)
#
# Two other basic features of Keras that you'll find useful are:
# - `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
# - `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
#
# Run the following code.
# In[11]:
happyModel.summary()
# In[12]:
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
# In[ ]:
| gpl-3.0 |
csiu/promi2 | code/plots.py | 1 | 13523 | #!/usr/bin/env python
# Author: csiu
# Created: 2015-04-24
import argparse
from ConfigParser import SafeConfigParser
import re
import os
import sys
import pandas as pd
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects.lib import ggplot2
from rpy2.robjects import r
from utils import get_value_from_keycolonvalue_list, ensure_dir
import label as lb
usage = """Generate plots (pie + histogram) with R via rpy2
Depends on:
- pandas (python module)
- rpy2 (python module)
- ggplot2 (R package)
"""
def _check_labelling(infile, labelfile):
## simple check
with open(infile) as f:
for l in f:
info = l.strip().split('\t')[8].split(';')
label = get_value_from_keycolonvalue_list('mirna_label', info)
if label == '':
isLabelled = False
else:
isLabelled = True
break
if isLabelled:
return infile
else:
print '## No labelling is found, proceed with labelling...'
outfile = '%s.label' % infile
lb.main(infile, labelfile, outfile)
return outfile
def _verify_valid_distance(infile):
out_good = infile + '.validdistance'
out_bad = infile + '.badpair'
with open(out_bad, 'w') as outB:
with open(out_good, 'w') as outG:
with open(infile) as f:
for l in f:
l = l.strip().split('\t')
info = l[8].split(';')
d = get_value_from_keycolonvalue_list('distance', info)
if d == 'NA':
chrom = l[0]
start = l[3]
stop = l[4]
strand = l[6]
mirna = get_value_from_keycolonvalue_list('mirna_query', info)
badpair = 'chr%s:%s..%s,%s\t%s' % (chrom, start, stop, strand,
mirna)
outB.write(badpair + '\n')
else:
outG.write('\t'.join(l) + '\n')
if os.stat(out_bad).st_size != 0:
print "## There are some bad positions in your input file:"
print "## chromosome or strand differences between TSS and miRNA pair"
print out_bad
else:
os.remove(out_bad)
return out_good
def _filterPredictionsByClass_reformat2gff(infile, outdir, keep='prom'):
outfile = os.path.join(outdir, os.path.basename(infile)+'.filtered')
with open(outfile, 'w') as out:
with open(infile) as f:
for l in f:
l = l.strip().split('\t')
classlabel = l[13]
if classlabel == keep:
newinfo = ';'.join([l[8],
'prior_prom:' + l[9],
'prior_back:' + l[10],
'prob_prom:' + l[11],
'prob_back:' + l[12],
'class:' + classlabel])
newline = '\t'.join(l[0:8] + [newinfo])
out.write(newline + '\n')
return outfile
def _read_dat(gff_infile):
dat = {}
n = 0
with open(gff_infile) as f:
for l in f:
n += 1
l = l.strip().split('\t')
chrom = l[0]
tstart = l[3]
tstop = l[4]
strand = l[6]
tss = ','.join([chrom, tstart, tstop, strand])
info = l[8].split(';')
mirbase_id = get_value_from_keycolonvalue_list('mirbase_id', info)
mstart = get_value_from_keycolonvalue_list('mirna_start', info)
mstop = get_value_from_keycolonvalue_list('mirna_start', info)
label = get_value_from_keycolonvalue_list('mirna_label', info)
if label == '': label = 'NA'
mirna = ','.join([chrom, mstart, mstop, strand])
features = l[7].split(';')
corr = get_value_from_keycolonvalue_list('corr', features)
if get_value_from_keycolonvalue_list('mirna_prox', features) != 0:
distance = get_value_from_keycolonvalue_list('distance', info)
if distance == '': distance = 0
dat[n] = [tss, mirna, mirbase_id, label, distance, abs(float(distance)), corr]
dat = pd.DataFrame.from_dict(dat, orient='index')
dat.columns = ['tss', 'mirna', 'mirbase_id', 'label', 'Distance', 'distance', 'correlation']
return dat
def _item_findClosestPartner(dat, item, ignoreCorr):
df = dat[dat['label'] != 'NA'] ## remove invalid pairs
xindex = []
for i in df[item].unique():
subm = df[df[item] == i]
## min distance
submd = subm[subm['distance'] == subm['distance'].min()]
if ignoreCorr:
l = submd
else:
## absmax correlation
l = submd['correlation'].astype(float).abs()
l = l[l == l.max()]
xindex.append(l.index[0])
return dat[dat.index.isin(xindex)]
def _plt_percountr(dat, independentpdf=False, fname='xpercount.pdf'):
def _filt_dat(dat, item, getlabel=True):
df = pd.DataFrame(dat[item].value_counts())
df.columns = ['count']
if getlabel: df['label'] = [list(dat[dat[item] == i]['label'])[0] for i in df.index]
n = len(df)
mx = max(df['count'])
return df, n, mx
dat = dat[dat['label'] != 'NA']
## NUMBER OF MIRNA PER TSS
df, n, mx = _filt_dat(dat, 'tss', False)
df = {'count': robjects.IntVector(df['count'])}
df = robjects.DataFrame(df)
pt = ggplot2.ggplot(df) + \
ggplot2.geom_histogram(binwidth=1, origin=-.5, alpha=.5, position="identity") + \
ggplot2.xlim(-.5, mx+1) + \
ggplot2.aes_string(x='count') + \
ggplot2.ggtitle('TSS [Total = %s]' % n) + \
ggplot2.labs(x='Number of miRNA per TSS (max = %s)' % mx)
pt_den = ggplot2.ggplot(df) + \
ggplot2.aes_string(x='count', y='..density..') + \
ggplot2.geom_density(binwidth=1, alpha=.5, origin=-.5) + \
ggplot2.geom_histogram(binwidth=1, alpha=.33, position='identity', origin=-.5) + \
ggplot2.ggtitle('TSS [Total = %s]' % n) + \
ggplot2.labs(x='Number of miRNA per TSS (max = %s)' % mx)
## NUMBER OF TSS PER MIRNA
df, n, mx = _filt_dat(dat, 'mirna')
df = {'count': robjects.IntVector(df['count']),
'label': robjects.StrVector(df['label']) }
df = robjects.DataFrame(df)
_pm = ggplot2.ggplot(df) + \
ggplot2.geom_histogram(binwidth=1, origin=-.5, alpha=.5, position="identity") + \
ggplot2.xlim(-.5, mx+1) + \
ggplot2.ggtitle('miRNA [Total = %s]' % n)
_pm_den = ggplot2.ggplot(df) + \
ggplot2.geom_density(binwidth=1, alpha=.5, origin=-.5) + \
ggplot2.geom_histogram(binwidth=1, alpha=.33, position='identity', origin=-.5) + \
ggplot2.ggtitle('miRNA [Total = %s]' % n)
## not split by label
pm = _pm + ggplot2.aes_string(x='count')
pm_den = _pm_den + ggplot2.aes_string(x='count', y='..density..')
## split by label
pms = _pm + ggplot2.aes_string(x='count', fill='label')
pm_dens = _pm_den + ggplot2.aes_string(x='count', fill='label', y='..density..')
## add xlabelling (need to be added after aes_string)
_xlab = ggplot2.labs(x='Number of TSS per miRNA (max = %s)' % mx)
pm += _xlab
pm_den += _xlab
pms += _xlab
pm_dens += _xlab
if independentpdf:
grdevices = importr('grDevices')
grdevices.pdf(fname)
pt.plot()
pt_den.plot()
pm.plot()
pm_den.plot()
pms.plot()
pm_dens.plot()
grdevices.dev_off()
else:
pt.plot()
pt_den.plot()
pm.plot()
pm_den.plot()
pms.plot()
pm_dens.plot()
return
def _plt_distr(dat, col, title='', splitBy_pfill=True, pfill='label', independentpdf=False, fname='xdistr.pdf'):
df = dat[dat[pfill] != 'NA'] ## remove invalid pairs
n = len(df)
df = {col: robjects.FloatVector(list(df[col])),
pfill: robjects.StrVector(list(df[pfill]))}
df = robjects.DataFrame(df)
pp = ggplot2.ggplot(df) + \
ggplot2.ggtitle('%s [Total = %s]' % (title, n))
## Plot1: counts
if splitBy_pfill:
p1 = pp + ggplot2.aes_string(x=col, fill=pfill)
else:
p1 = pp + ggplot2.aes_string(x=col)
## Plot2: density
if splitBy_pfill:
p2 = pp + ggplot2.aes_string(x=col, fill=pfill, y='..density..')
else:
p2 = pp + ggplot2.aes_string(x=col, y='..density..')
p2 = p2 + ggplot2.geom_density(alpha=.5, origin=-500)
if col == 'distance':
p1 = p1 + \
ggplot2.geom_histogram(binwidth=1000, alpha=.5, position='identity', origin=-500) + \
ggplot2.xlim(-1000, 51000)
p2 = p2 + \
ggplot2.geom_histogram(binwidth=1000, alpha=.33, position='identity', origin=-500) + \
ggplot2.xlim(-1000, 51000)
else:
p1 = p1 + \
ggplot2.geom_histogram(alpha=.5, position='identity')
p2 = p2 + \
ggplot2.geom_histogram(alpha=.33, position='identity')
if col == 'correlation':
p1 = p1 + ggplot2.xlim(-1.1, 1.1)
p2 = p2 + ggplot2.xlim(-1.1, 1.1)
if independentpdf:
grdevices = importr('grDevices')
grdevices.pdf(file=fname)
p1.plot()
p2.plot()
grdevices.dev_off()
else:
p1.plot()
p2.plot()
return
def _plt_pier(dat, title='', rm_na=False, col='label',
independentpdf=False, fname='xpire.pdf'):
x = dat[col]
if rm_na: x = x[x != 'NA']
x = x.value_counts()
_sum = sum(x)
r.assign('n', len(x))
slices = robjects.FloatVector(x)
pct = ['%0.1f%%' % float(float(i)*100/_sum) for i in x]
lbls = ['%s %s' % i for i in zip(x.index, pct)]
lbls = robjects.StrVector(lbls)
ggcolors = r('''
gg_color_hue <- function(n){
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
rev(gg_color_hue(n))
''')
if independentpdf:
grdevices = importr('grDevices')
grdevices.pdf(pdf)
r.pie(slices,
labels=lbls,
init_angle=90,
col=ggcolors,
main=title)
r.text(-1, -1, '[Total = %s]' % _sum)
if independentpdf:
grdevices.dev_off()
return
def main(infile, outdir, config):
outdir = os.path.abspath(outdir)
ensure_dir(outdir, False)
cparser = SafeConfigParser()
cparser.read(config)
labelfile = cparser.get('configs', 'labelfile')
infile = _check_labelling(infile, labelfile)
infile = _verify_valid_distance(infile)
infile = _filterPredictionsByClass_reformat2gff(infile, outdir)
bname = os.path.basename(infile)
pdf_rplots = os.path.join(outdir, bname + '.plots.pdf')
dat = _read_dat(infile)
if all(dat['correlation'] == ''):
ignoreCorr = True
else:
ignoreCorr = False
dat_mirna = _item_findClosestPartner(dat, 'mirna', ignoreCorr)
dat_tss = _item_findClosestPartner(dat, 'tss', ignoreCorr)
print '## Generating plot file...'
grdevices = importr('grDevices')
grdevices.pdf(file=pdf_rplots)
_plt_pier(dat, 'All predicted TSS-miRNA pairs', True)
_plt_distr(dat, 'distance', 'All predicted tss-miRNA pairs', False)
_plt_distr(dat, 'distance', 'All predicted tss-miRNA pairs')
if not ignoreCorr:
_plt_distr(dat, 'correlation', 'All predicted tss-miRNA pairs', False)
_plt_distr(dat, 'correlation', 'All predicted tss-miRNA pairs')
_plt_percountr(dat)
_plt_pier(dat_tss, 'TSS (label from closest miRNA)')
_plt_distr(dat_tss, 'distance', 'TSS to closest miRNA', False)
_plt_distr(dat_tss, 'distance', 'TSS to closest miRNA')
if not ignoreCorr:
_plt_distr(dat_tss, 'correlation', 'TSS to closest miRNA', False)
_plt_distr(dat_tss, 'correlation', 'TSS to closest miRNA')
_plt_pier(dat_mirna, 'miRNA')
_plt_distr(dat_mirna, 'distance', 'miRNA to closest TSS', False)
_plt_distr(dat_mirna, 'distance', 'miRNA to closest TSS')
if not ignoreCorr:
_plt_distr(dat_mirna, 'correlation', 'miRNA to closest TSS', False)
_plt_distr(dat_mirna, 'correlation', 'miRNA to closest TSS')
grdevices.dev_off()
print '## Plot file:'
print pdf_rplots
return pdf_rplots
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
required=True,
help='''path to input file;
e.g. output of "label.py"
"mirna_label:" should be contained in the info column (9)''')
parser.add_argument('-o', '--outdir', dest='outdir',
default='../Testout-plot',
help='''specify path to output directory''')
parser.add_argument('-c', '--config', dest='config',
default='config.ini',
help='path to config file; default="config.ini"')
##get at the arguments
args = parser.parse_args()
## do something..
main(args.infile, args.outdir, args.config)
| mit |
ssh0/growing-string | triangular_lattice/growing_string.py | 1 | 19450 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-07-12
from __future__ import print_function
from triangular import LatticeTriangular as LT
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.cm as cm
import matplotlib.animation as animation
from base import Main as base
from strings import String
import numpy as np
import operator
import pprint
pp = pprint.PrettyPrinter(indent=4)
class Main(base):
"""任意に設定したstringの近傍点に点を追加し成長させるモデル
グラフ上で左端と右端に固定されたstringの近傍点を探索,ランダム(後には曲げ
弾性による重み付けの効果を追加)に選択し,stringを成長させていくモデル
"""
def __init__(self, Lx=40, Ly=40,
boundary={'h': 'periodic', 'v': 'periodic'},
size=[5, 4, 10, 12],
plot=True,
plot_surface=True,
save_image=False,
save_video=False,
filename_image="",
filename_video="",
frames=1000,
beta = 2.,
interval=1,
weight_const=0.5,
strings=None,
pre_function=None,
post_function=None):
"""Init function of Main class.
Lx (int (even)): 格子のx方向(グラフではy軸)の格子点数
Ly (int (even)): 格子のy方向(グラフではx軸)の格子点数
"""
# Create triangular lattice with given parameters
# self.lattice = LT(np.zeros((Lx, Ly), dtype=np.int),
# scale=float(max(Lx, Ly)), boundary=boundary)
self.lattice = LT(
np.zeros((Lx, Ly), dtype=np.int),
scale=float(max(Lx, Ly)),
boundary=boundary
)
self.lattice_X = self.lattice.coordinates_x.reshape(
self.lattice.Lx,
self.lattice.Ly
)
self.lattice_Y = self.lattice.coordinates_y.reshape(
self.lattice.Lx,
self.lattice.Ly
)
self.occupied = np.zeros((Lx, Ly), dtype=np.bool)
self.number_of_lines = Lx
if strings is None:
# Put the strings to the lattice
self.strings = self.create_random_strings(len(size), size)
else:
self.strings = [String(self.lattice, **st) for st in strings]
for string in self.strings:
self.occupied[string.pos_x, string.pos_y] = True
self.plot = plot
self.plot_surface = plot_surface
self.save_image = save_image
self.save_video = save_video
if self.save_image:
if filename_image == "":
raise AttributeError("`filename_image` is empty.")
else:
self.filename_image = filename_image
if self.save_video:
if self.plot:
raise AttributeError("`save` and `plot` method can't be set both True.")
if filename_video == "":
raise AttributeError("`filename_video` is empty.")
else:
self.filename_video = filename_video
self.interval = interval
self.frames = frames
# 逆温度
self.beta = beta
# self.beta = 100. # まっすぐ(≒低温極限)
# self.beta = 10. # まっすぐ
# self.beta = 0. # 高温極限
# self.beta = 5. # 中間的
self.weight_const = weight_const
self.bonding_pairs = {i: {} for i in range(len(self.strings))}
for key in self.bonding_pairs.keys():
value = self.get_bonding_pairs(
s=self.strings[key],
# indexes=[[0, len(self.strings[key].pos)]]
index_start=0,
index_stop=len(self.strings[key].pos)
)
# TODO: 隣接点がないとき,全体のシミュレーションを終了する
# if len(value) == 0:
# return False
self.bonding_pairs[key] = value
# pp.pprint(self.bonding_pairs)
# print(self.strings[0].pos)
# pp.pprint(self.bonding_pairs[0])
# pp.pprint(self.bonding_pairs)
# return None
self.pre_function = pre_function
self.post_function = post_function
self.pre_func_res = []
self.post_func_res = []
# Plot triangular-lattice points, string on it, and so on
if self.plot:
self.plot_all()
self.start_animation()
elif self.save_video:
self.plot_all()
self.start_animation(filename=self.filename_video)
else:
t = 0
while t < self.frames:
try:
self.update()
t += 1
except StopIteration:
break
if self.save_image:
if not self.__dict__.has_key('fig'):
self.plot_all()
self.fig.savefig(self.filename_image)
plt.close()
# print("Image file is successfully saved at '%s'." % filename_image)
def _update_dict(self, dict, key, value):
if dict.has_key(key):
dict[key].append(value)
else:
dict[key] = [value]
def dot(self, v, w):
"""0〜5で表された6つのベクトルの内積を計算する。
v, w (int): ベクトル(0〜5の整数で表す)"""
if (w + 6 - v) % 6 == 0:
return 1.
elif (w + 6 - v) % 6 == 1 or (w + 6 - v) % 6 == 5:
return 0.5
elif (w + 6 - v) % 6 == 2 or (w + 6 - v) % 6 == 4:
return -0.5
elif (w + 6 - v) % 6 == 3:
return -1.
def plot_all(self):
"""軸の設定,三角格子の描画,線分描画要素の用意などを行う
ここからFuncAnimationを使ってアニメーション表示を行うようにする
"""
self.fig, self.ax = plt.subplots(figsize=(8, 8))
lattice_X = self.lattice.coordinates_x
lattice_Y = self.lattice.coordinates_y
X_min, X_max = min(lattice_X) - 0.1, max(lattice_X) + 0.1
Y_min, Y_max = min(lattice_Y) - 0.1, max(lattice_Y) + 0.1
self.ax.set_xlim([X_min, X_max])
self.ax.set_ylim([Y_min, Y_max])
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
self.ax.set_aspect('equal')
## if the lattice size exceeds 200, don't draw triangular lattice.
if max(self.lattice.Lx, self.lattice.Ly) < 200:
triang = tri.Triangulation(lattice_X, lattice_Y)
self.ax.triplot(triang, color='#d5d5d5', lw=0.5)
self.lines = [self.ax.plot([], [], linestyle='-',
color='black',
markerfacecolor='black',
markeredgecolor='black')[0]
for i in range(self.number_of_lines)]
if self.plot_surface:
# self._num_surface = 1
# self.lines.append(self.ax.plot([], [], '.', color='#ff0000')[0])
self._num_surface = 9
self.lines += [self.ax.plot([], [], '.',
)[0]
for i in range(self._num_surface)]
self.plot_string()
def start_animation(self, filename=""):
if self.__dict__.has_key('frames'):
frames = self.frames
else:
frames = 1000
def init_func(*arg):
return self.lines
ani = animation.FuncAnimation(self.fig, self.update, frames=frames,
init_func=init_func,
interval=self.interval,
blit=True, repeat=False)
if filename != "":
try:
ani.save(filename, codec="libx264", bitrate=-1, fps=30)
except:
print("Can't saved.")
else:
print("Animation is successfully saved at '%s'." % filename)
else:
plt.show()
def plot_string(self):
"""self.strings内に格納されているStringを参照し,グラフ上に図示する
"""
# print self.string.pos, self.string.vec
i = 0 # to count how many line2D object
for s in self.strings:
start = 0
for j, pos1, pos2 in zip(range(len(s.pos) - 1), s.pos[:-1], s.pos[1:]):
dist_x = abs(self.lattice_X[pos1[0], pos1[1]] -
self.lattice_X[pos2[0], pos2[1]])
dist_y = abs(self.lattice_Y[pos1[0], pos1[1]] -
self.lattice_Y[pos2[0], pos2[1]])
# print j, pos1, pos2
# print dist_x, dist_y
# sqrt(2^{2} + (1/2)^{2}) ~ 2.06
if dist_x > 2.1 * self.lattice.dx or dist_y > 2.1 * self.lattice.dx:
x = s.pos_x[start:j + 1]
y = s.pos_y[start:j + 1]
X = [self.lattice_X[_x, _y] for _x, _y in zip(x, y)]
Y = [self.lattice_Y[_x, _y] for _x, _y in zip(x, y)]
self.lines[i].set_data(X, Y)
start = j + 1
i += 1
else:
x = s.pos_x[start:]
y = s.pos_y[start:]
X = [self.lattice_X[_x, _y] for _x, _y in zip(x, y)]
Y = [self.lattice_Y[_x, _y] for _x, _y in zip(x, y)]
self.lines[i].set_data(X, Y)
i += 1
num_plot_surface = 0
if self.plot_surface:
if self._num_surface == 1:
num_plot_surface = 1
neighbors = []
for bonding_pairs in self.bonding_pairs.values():
# print(bonding_pairs)
for pos in bonding_pairs.keys():
neighbors.append(pos)
neighbors = list(np.array(neighbors).T)
# print(neighbors)
X, Y = self.lattice_X[neighbors], self.lattice_Y[neighbors]
# print(X, Y)
self.lines[-1].set_data(X, Y)
# ===
else:
w = {}
for bonding_pairs in self.bonding_pairs.values():
# print(bonding_pairs)
for pos, bps in bonding_pairs.items():
for bp, _w in bps:
if w.has_key(_w):
w[_w].append(pos)
else:
w[_w] = [pos,]
num_plot_surface = len(w)
# W = sorted(w.keys(), reverse=True)
sum_w = np.sum([len(w[_w]) for _w in w.keys()])
W = [(_w, (_w * len(w[_w])) / sum_w) for _w in w.keys()]
ave_W = np.average(W, axis=0)[1]
min_W = np.exp(self.beta * (-2.5)) /sum_w
max_W = np.exp(self.beta * 2.5) /sum_w
for k, (wi, _w) in enumerate(W):
neighbors = list(np.array(w[wi]).T)
X, Y = self.lattice_X[neighbors], self.lattice_Y[neighbors]
self.lines[-(k + 1)].set_data(X, Y)
## color setting
dw = _w - ave_W
if min_W == ave_W:
_c = 0.5
elif dw < 0:
_c = (0.5 / (ave_W - min_W)) * (_w - min_W)
else:
_c = (0.5 / (max_W - ave_W)) * dw + 0.5
self.lines[-(k + 1)].set_color(cm.plasma(_c))
# 最終的に,iの数だけ線を引けばよくなる
# それ以上のオブジェクトはリセット
if self.plot_surface:
max_obj = len(self.lines) - num_plot_surface
else:
max_obj = len(self.lines)
for j in range(i, max_obj):
self.lines[j].set_data([], [])
return self.lines
def update(self, num=0):
"""FuncAnimationから各フレームごとに呼び出される関数
1時間ステップの間に行う計算はすべてここに含まれる。
"""
# update each string
for i, s in enumerate(self.strings):
if self.pre_function is not None:
self.pre_func_res.append(self.pre_function(self, i, s))
ret = self.update_each_string(i)
if self.post_function is not None:
self.post_func_res.append(self.post_function(self, i, s))
if self.plot or self.save_video:
return self.plot_string()
def update_each_string(self, key):
X = self.get_neighbor_xy(key)
if not X:
raise StopIteration
# print(X)
s = self.strings[key]
# update positions
if len(X) == 4:
i, r_rev, nx, ny = X
s.x, s.y = nx, ny
s.insert(0, r_rev)
x, y = s.pos_x[0], s.pos_y[0]
elif len(X) == 2:
i, r = X
s.insert(i + 1, r)
x, y = s.pos_x[-1], s.pos_y[-1]
else:
i, r, r_rev = X
s.vec[i] = r
s.insert(i + 1, r_rev)
x, y = s.pos_x[i + 1], s.pos_y[i + 1]
self.occupied[x, y] = True
# print("== start == (%d, %d)" % (x, y))
# pp.pprint(self.bonding_pairs[key])
for k, bonding_pairs in self.bonding_pairs.items():
if bonding_pairs.has_key((x, y)):
del self.bonding_pairs[k][(x, y)]
# print("del self.bonding_pairs[%d][(%d, %d)]" % (k, x, y))
# pp.pprint(self.bonding_pairs[key])
index_start = i
index_stop = len(s.pos)
# print(index_start, index_stop)
self.cleanup_bonding_pairs(
key=key,
index_start=index_start,
index_stop=index_stop
)
value = self.get_bonding_pairs(
s=self.strings[key],
index_start=index_start,
index_stop=index_stop
)
# pp.pprint(value)
# pp.pprint(self.bonding_pairs[key])
for k, v in value.items():
if self.bonding_pairs[key].has_key(k):
self.bonding_pairs[key][k] += v
else:
self.bonding_pairs[key][k] = v
# pp.pprint(self.bonding_pairs[key])
# print("== end ==")
# pp.pprint(self.strings[key].pos)
# pp.pprint(self.bonding_pairs[key].keys())
def cleanup_bonding_pairs(self, key, index_start, index_stop):
rang = range(index_start, index_stop)
for pos, l in self.bonding_pairs[key].items():
tmp = [l[i] for i, (bp, w) in enumerate(l) if not bp[0] in rang]
if len(tmp) == 0:
del self.bonding_pairs[key][pos]
else:
self.bonding_pairs[key][pos] = tmp
def get_neighbor_xy(self, key):
"""Stringクラスのインスタンスsの隣接する非占有格子点の座標を取得する
s (String): 対象とするStringクラスのインスタンス
"""
if len(self.bonding_pairs[key]) == 0:
return False
# bonding_pairsの選ばれやすさを適切に重みを付けて評価
weights = []
bonding_pairs = []
for (pair, w) in reduce(operator.add, self.bonding_pairs[key].values()):
bonding_pairs.append(pair)
weights.append(w)
weights = np.array(weights)
weights = weights / np.sum(weights)
# print(weights)
choiced_index = np.random.choice(range(len(weights)), p=weights)
# print(bonding_pairs[choiced_index])
return bonding_pairs[choiced_index]
def calc_weight(self, s, i, r_i=None, r_rev=None):
"""ベクトルの内積を元に,Boltzmann分布に従って成長点選択の重みを決定
"""
if (i == 1) and (not s.loop):
w = self.dot(r_rev, s.vec[i]) - self.dot(s.vec[0], s.vec[1]) \
- self.weight_const
elif (i == len(s.pos) - 1) and (not s.loop):
w = self.dot(s.vec[i - 2], r_i) - \
self.dot(s.vec[i - 2], s.vec[i - 1]) - self.weight_const
else:
# w = self.dot(s.vec[i - 2], r_i) + self.dot(r_rev, s.vec[i % len(s.vec)])
w = (self.dot(s.vec[i - 2], r_i) + \
self.dot(r_rev, s.vec[i % len(s.vec)])) \
- (self.dot(s.vec[i - 2], s.vec[i - 1]) + \
self.dot(s.vec[i - 1], s.vec[i % len(s.vec)])) \
- self.weight_const
W = np.exp(self.beta * w)
return W
def get_bonding_pairs(self, s, index_start, index_stop):
bonding_pairs = {}
neighbors_dict = {}
rang = range(index_start, index_stop)
if s.loop and (0 in rang):
rang.append(0)
for i in rang:
x, y = s.pos[i]
nnx, nny = self.lattice.neighbor_of(x, y)
for r in [0, 1, 2, 3, 4, 5]:
nx, ny = nnx[r], nny[r]
if self.occupied[nx, ny] or nx == -1 or ny == -1:
continue
r_rev = (r + 3) % 6
if not neighbors_dict.has_key((nx, ny)):
if not s.loop:
if i == 0:
w = self.dot(r_rev, s.vec[0])
W = np.exp(self.beta * w)
self._update_dict(bonding_pairs,
(nx, ny),
[[0, r_rev, nx, ny], W])
elif i == len(s.pos) - 1:
w = self.dot(s.vec[i - 1], r)
W = np.exp(self.beta * w)
self._update_dict(bonding_pairs,
(nx, ny),
[[i, r], W])
neighbors_dict[(nx, ny)] = [(i, r),]
else:
if neighbors_dict[(nx, ny)][-1][0] == i - 1:
r_i = neighbors_dict[(nx, ny)][-1][1]
W = self.calc_weight(s, i, r_i, r_rev)
self._update_dict(bonding_pairs,
(nx, ny),
[[i - 1, r_i, r_rev], W])
neighbors_dict[(nx, ny)].append((i, r))
return bonding_pairs
if __name__ == '__main__':
# import timeit
# print(timeit.timeit("Main(Lx=1000, Ly=1000, size=[3,] * 1, \
# strings=[{'id': 1, 'x': 250, 'y': 500, 'vec': [0, 4]}], \
# plot=False)",
# setup="from __main__ import Main",
# number=10
# ))
L = 100
main= Main(Lx=L, Ly=L, size=[3,] * 1, frames=1000,
beta=3.,
plot=True, plot_surface=False,
interval=0,
strings=[{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4]}]
# strings=[{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4, 2]}]
)
| mit |
pradyu1993/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 2 | 7106 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditionning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditionning of the
design matrix. For a well-conditionned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditionned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print __doc__
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD
import pylab as pl
import numpy as np
from scipy import linalg
from sklearn.linear_model import RandomizedLasso, lasso_stability_path, \
LassoLarsCV
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(
np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant))
)
return np.max(np.abs(projector).sum(axis=1))
for conditionning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditionned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditionning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y,
random_state=42, eps=0.05)
pl.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = pl.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = pl.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = pl.ylim()
pl.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
pl.ylabel('Stability score: proportion of times selected')
pl.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
pl.axis('tight')
pl.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100, compute_importances=True).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
pl.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
pl.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
pl.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Score")
# Plot only the 100 first coefficients
pl.xlim(0, 100)
pl.legend(loc='best')
pl.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
pl.show()
| bsd-3-clause |
cbrnr/scot | doc/sphinxext/inheritance_diagram.py | 4 | 13650 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException as e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| mit |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/examples/skflow/multioutput_regression.py | 9 | 2552 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example uses the same data as one here:
http://scikit-learn.org/stable/auto_examples/tree/plot_tree_regression_multioutput.html
Instead of DecisionTree a 2-layer Deep Neural Network with RELU activations is used.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from tensorflow.contrib import learn
# Create random dataset.
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
# Fit regression DNN models.
regressors = []
options = [[2], [10, 10], [20, 20]]
for hidden_units in options:
def tanh_dnn(X, y):
features = learn.ops.dnn(X, hidden_units=hidden_units,
activation=learn.tf.tanh)
return learn.models.linear_regression(features, y)
regressor = learn.TensorFlowEstimator(model_fn=tanh_dnn, n_classes=0,
steps=500, learning_rate=0.1, batch_size=100)
regressor.fit(X, y)
score = mean_squared_error(regressor.predict(X), y)
print("Mean Squared Error for {0}: {1:f}".format(str(hidden_units), score))
regressors.append(regressor)
# Predict on new random Xs.
X_test = np.arange(-100.0, 100.0, 0.1)[:, np.newaxis]
y_1 = regressors[0].predict(X_test)
y_2 = regressors[1].predict(X_test)
y_3 = regressors[2].predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g",
label="hidden_units{}".format(str(options[0])))
plt.scatter(y_2[:, 0], y_2[:, 1], c="r",
label="hidden_units{}".format(str(options[1])))
plt.scatter(y_3[:, 0], y_3[:, 1], c="b",
label="hidden_units{}".format(str(options[2])))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output DNN Regression")
plt.legend()
plt.show()
| apache-2.0 |
Titan-C/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 62 | 2295 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
# #############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
# #############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
# plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
# plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
yunque/sms-tools | lectures/08-Sound-transformations/plots-code/stftMorph-frame.py | 21 | 2700 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy.signal import hamming, resample
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import math
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .1
balancef = .7
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1+1)/2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1/2)) # half analysis window size by floor
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2+1)/2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2/2)) # half analysis window size by floor2
loc1 = 14843
loc2 = 9294
x1 = x1[loc1-hM1_1:loc1+hM1_2]
x2 = x2[loc2-hM2_1:loc2+hM2_2]
mX1, pX1 = DFT.dftAnal(x1, w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2, w2, N2) # compute dft
# morph
mX2smooth = resample(np.maximum(-200, mX2), mX2.size*smoothf) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX2.size)
mY = balancef * mX2 + (1-balancef) * mX1 # generate output spectrum
#-----synthesis-----
y = DFT.dftSynth(mY, pX1, M1) * sum(w1) # overlap-add to generate output sound
mY1, pY1 = DFT.dftAnal(y, w1, M1) # overlap-add to generate output sound
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N1)/float(fs), x1*w1, 'b', lw=1.5)
plt.axis([0, N1/float(fs), min(x1*w1), max(x1*w1)])
plt.title('x1 (orchestra.wav)')
plt.subplot(323)
plt.plot(fs*np.arange(mX1.size)/float(mX1.size), mX1-max(mX1), 'r', lw=1.5, label = 'mX1')
plt.plot(fs*np.arange(mX2.size)/float(mX2.size), mX2-max(mX2), 'k', lw=1.5, label='mX2')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-70,2])
plt.title('mX1 + mX2 (speech-male.wav)')
plt.subplot(325)
plt.plot(fs*np.arange(pX1.size)/float(pX1.size), pX1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX1),20])
plt.title('pX1')
plt.subplot(322)
plt.plot(np.arange(N1)/float(fs), y, 'b', lw=1.5)
plt.axis([0, float(N1)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1-max(mY1), 'r', lw=1.5)
plt.axis([0,fs/4.0,-70,2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY1.size)/float(pY1.size), pY1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY1),6])
plt.title('pY')
plt.tight_layout()
plt.savefig('stftMorph-frame.png')
plt.show()
| agpl-3.0 |
probml/pyprobml | scripts/kmeansYeastDemo.py | 1 | 1963 | from scipy.io import loadmat
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from matplotlib import cm
from matplotlib.colors import ListedColormap,LinearSegmentedColormap
data = loadmat('/pyprobml/data/yeastData310.mat') # dictionary containing 'X', 'genes', 'times'
X = data['X']
# Cluster yeast data using Kmeans
kmeans = KMeans(n_clusters=16,random_state=0,algorithm='full').fit(X)
times = data['times']
X = X.transpose()
labels = kmeans.labels_
clu_cen = kmeans.cluster_centers_
clusters = [[] for i in range(0,16)]
for (i,l) in enumerate(labels):
clusters[l].append(i)
times = times.reshape((7,))
# Visualizing all the time series assigned to each cluster
for l in range(0,16):
plt.subplot(4,4,l+1)
if clusters[l] != []:
plt.plot(times,X[:,clusters[l]])
plt.suptitle("K-Means Clustering of Profiles")
plt.savefig("/pyprobml/figures/yeastKmeans16.pdf", dpi=300)
plt.show()
# Visualizing the 16 cluster centers as prototypical time series.
for l in range(0,16):
plt.subplot(4,4,l+1).axis('off')
plt.plot(times,clu_cen[l,:])
plt.suptitle("K-Means centroids")
plt.savefig("/pyprobml/figures/clusterYeastKmeansCentroids16.pdf", dpi=300)
plt.show()
# yeast gene expression data plotted as a time series
plt.plot(times,X,'o-')
plt.title('yeast microarray data')
plt.xlabel('time')
plt.ylabel('genes')
plt.xlim([0,max(times)])
plt.xticks(ticks=times,labels=times)
plt.savefig("/pyprobml/figures/yeastTimeSeries.pdf", dpi=300)
plt.show()
# yeast gene expression data plotted as a heat map
basic_cols=['#66ff00', '#000000', '#FF0000'] # green-black-red
my_cmap=LinearSegmentedColormap.from_list('mycmap', basic_cols)
plt.xticks(ticks=[i+0.5 for i in range(0,7)],labels=times)
plt.pcolormesh(X.transpose(),cmap=my_cmap)
plt.title('yeast microarray data')
plt.xlabel('time')
plt.ylabel('genes')
plt.colorbar()
plt.savefig("/pyprobml/figures/yeastHeatMap.pdf", dpi=300)
| mit |
endolith/scipy | scipy/stats/_distn_infrastructure.py | 5 | 135895 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains fraction alpha [0, 1] of the
distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# Nothing to do
return newargs
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 63 | 6459 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
# Test that deprecated ridge_alpha parameter throws warning
warning_msg = "The ridge_alpha parameter on transform()"
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=0.01)
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=None)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/datasets/species_distributions.py | 24 | 7871 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
RyanCarey/abm-platform | plot.py | 1 | 3482 | #from __future__ import print_function
from numpy import *
import matplotlib.pyplot as plt
exec(open('unpickle.py').read())
### old ###
def load_turn(filenames,turn, length):
filenames = list(copy(filenames))
positions = []
while filenames:
data = load(filenames.pop())
if len(data)==length: # check that data has reached full length without error
cells = array(data[turn]['alive_cells'])
positions.append(cells[:,[1,2,-1]])
return positions
def load_all_turns(filenames, length):
# prevent mutation of filenames list
filenames = list(copy(filenames))
b = []
for i in range(length):
b.append(load_turn(filenames,i, length))
return b
def load_all(filenames):
data = []
for name in filenames:
data.append(load(name))
out = [i[1:] for i in data]
#extract cell information
for i in range(len(out)):
for j in range(len(out[i])):
out[i][j] = out[i][j]['alive_cells']
for i in range(len(out)):
for j in range(len(out[i])):
for k in range(len(out[i][j])):
# extract positions
out[i][j][k] = array(out[i][j][k][1:3])
out[i][j] = vstack(out[i][j])
return out
def remove_duds(data):
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
try:
out = data[i][j][k][1:3]
except:
print(i,j,k)
break
return out
def boxplot(data, title, labels):
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot([i[:,0] for i in data])
ax.set_title(title)
ax.set_xlabel('Iterations')
ax.set_ylabel('Horizontal location')
ax.set_xticklabels(labels)
plt.show()
return
def stacking(data):
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j] = vstack(data[i][j])
return data
def without_duds(data):
return data[array([len(i)==10 for i in data])]
def combine(data):
data = array(data)
data = vstack((i for i in data)).astype(float)
return data
def histogram(stems, progs, title):
#plots histogram of cell locations
bins = linspace(0,30,30)
# the histogram of the data
plt.hist(stems, bins, alpha=0.5, label='stem cells')
plt.hist(progs, bins, alpha=0.5, label='progenitor cells')
plt.legend(loc='upper right')
plt.xlabel('X-ordinate')
plt.ylabel('Cell count')
plt.title(title)
plt.axis([0, 30, 0, 60])
plt.grid(True)
plt.show()
def autolabel(rects, ax):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
def barchart(data):
ind = np.arange(len(data)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('0', '500', '1000', '1500', '2000','2500') )
autolabel(rects1,ax)
plt.show()
def linegraph(stems,progs, title):
xaxis = arange(0,5250,250)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(xaxis,progs,linewidth=2)
plt.plot(xaxis,stems,linewidth=2.0)
ax.set_title(title)
ax.set_xlabel('Iterations')
ax.set_ylabel('Population Size')
plt.legend(["Stem cells","Progenitor cells"],loc='upper left')
plt.show()
| mit |
SvichkarevAnatoly/Course-Python-Bioinformatics | semester2/task10/exercise1.py | 1 | 3895 | import numpy
import matplotlib.pyplot as plot
from sklearn import ensemble, tree
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
data = open('glass.data.csv')
# arrange data into list for labels and list of lists for attributes
xList = []
for line in data:
# split on comma
row = line.strip().split(",")
xList.append(row)
glassNames = numpy.array(['RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type'])
# Separate attributes and labels
xNum = []
labels = []
for row in xList:
labels.append(row.pop())
l = len(row)
# eliminate ID
attrRow = [float(row[i]) for i in range(1, l)]
xNum.append(attrRow)
# number of rows and columns in x matrix
nrows = len(xNum)
ncols = len(xNum[1])
# Labels are integers from 1 to 7 with no examples of 4.
# gb requires consecutive integers starting at 0
newLabels = []
labelSet = set(labels)
labelList = list(labelSet)
labelList.sort()
nlabels = len(labelList)
for l in labels:
index = labelList.index(l)
newLabels.append(index)
# Drawing 30% test sample may not preserve population proportions
# stratified sampling by labels.
xTemp = [xNum[i] for i in range(nrows) if newLabels[i] == 0]
yTemp = [newLabels[i] for i in range(nrows) if newLabels[i] == 0]
xTrain, xTest, yTrain, yTest = train_test_split(xTemp, yTemp, test_size=0.30, random_state=531)
for iLabel in range(1, len(labelList)):
# segregate x and y according to labels
xTemp = [xNum[i] for i in range(nrows) if newLabels[i] == iLabel]
yTemp = [newLabels[i] for i in range(nrows) if \
newLabels[i] == iLabel]
# form train and test sets on segregated subset of examples
xTrainTemp, xTestTemp, yTrainTemp, yTestTemp = train_test_split(
xTemp, yTemp, test_size=0.30, random_state=531)
# accumulate
xTrain = numpy.append(xTrain, xTrainTemp, axis=0)
xTest = numpy.append(xTest, xTestTemp, axis=0)
yTrain = numpy.append(yTrain, yTrainTemp, axis=0)
yTest = numpy.append(yTest, yTestTemp, axis=0)
missCLassError = []
nTreeList = range(50, 2000, 50)
depth = 1
for iTrees in nTreeList:
maxFeat = 4 # try tweaking
glassRFModel = ensemble.RandomForestClassifier(n_estimators=iTrees,
max_depth=depth, max_features=maxFeat,
oob_score=False, random_state=531)
glassRFModel.fit(xTrain, yTrain)
# Accumulate auc on test set
prediction = glassRFModel.predict(xTest)
correct = accuracy_score(yTest, prediction)
missCLassError.append(1.0 - correct)
print "Missclassification Error for max_depth = " + str(depth)
print(missCLassError[-1])
# generate confusion matrix
pList = prediction.tolist()
confusionMat = confusion_matrix(yTest, pList)
print('')
print("Confusion Matrix")
print(confusionMat)
# plot training and test errors vs number of trees in ensemble
plot.plot(nTreeList, missCLassError)
plot.xlabel('Number of Trees in Ensemble')
plot.ylabel('Missclassification Error Rate')
# plot.show()
plot.savefig("mer" + str(depth) + "depth.png")
plot.close()
# Plot feature importance
featureImportance = glassRFModel.feature_importances_
# normalize by max importance
featureImportance = featureImportance / featureImportance.max()
# plot variable importance
idxSorted = numpy.argsort(featureImportance)
barPos = numpy.arange(idxSorted.shape[0]) + .5
plot.barh(barPos, featureImportance[idxSorted], align='center')
plot.yticks(barPos, glassNames[idxSorted])
plot.xlabel('Variable Importance')
# plot.show()
plot.savefig("varImp" + str(depth) + "depth.png")
# save first 2 tree
with open("tree1Ex1.dot", 'w') as f1:
f1 = tree.export_graphviz(glassRFModel.estimators_[0], out_file=f1)
with open("tree2Ex1.dot", 'w') as f2:
f2 = tree.export_graphviz(glassRFModel.estimators_[1], out_file=f2)
| gpl-2.0 |
krez13/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
rishikksh20/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 39 | 5425 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
VariationalResearch/Polaron | finitemom.py | 1 | 11668 | from polrabi.staticfm import *
import matplotlib
import matplotlib.pyplot as plt
from scipy import interpolate
import os
# from timeit import default_timer as timer
# # INITIALIZATION
dirpath = os.path.dirname(os.path.realpath(__file__))
matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
gBB = 0.05
mI = 1
mB = 1
n0 = 1
nuV = nu(gBB)
DPMax = mI * nuV
res = aSi0(gBB, mI, mB, n0)
# # ZERO MOMENTUM
# aIBi_vals = np.linspace(-1, 1, 1000)
# aSi_v = aSi0(gBB, mI, mB, n0)
# E_vals = Eup(0, 0, aIBi_vals, aSi_v, mI, mB, n0)
# # compare with MATLAB values -- note: biggest discrepancy is near resonance where values are off by < 0.1 (understandable)
# Edat = np.genfromtxt('zm.dat', delimiter=',')
# mask = np.abs(Edat - E_vals) > 1e-2
# print(aIBi_vals[mask])
# fig, ax = plt.subplots()
# ax.plot(aIBi_vals, E_vals, 'k-')
# ax.set_ylim([-50, 50])
# ax.set_xlabel('Inverse Scattering Length ($a_{IB}^{-1}$)')
# ax.set_ylabel('Energy')
# ax.set_title('Polaron Energy at Zero Momentum (static case - no rabi drive)')
# plt.show()
# # INTERPOLATION
# Nsteps = 1e3
# createSpline(Nsteps, gBB, mI, mB, n0)
aSi_tck = np.load('aSi_spline.npy')
PBint_tck = np.load('PBint_spline.npy')
# DP_max = mI * nuV
# DPv = np.linspace(0, DP_max, 100)
# fig, ax = plt.subplots()
# ax.plot(DPv, aSi_interp(DPv, aSi_tck), 'k-')
# ax.plot(DPv, PB_interp(DPv, -4, gBB, mI, mB, n0, aSi_tck, PBint_tck), 'b-')
# plt.show()
# # DP GRID
# aIBiVals = np.linspace(-10, 10, 100)
# PcVals = PCrit(aIBiVals, gBB, mI, mB, n0)
# grid = []
# for ind, aIBi in enumerate(aIBiVals):
# step = 0.1 * mI * nuV
# PVals = np.arange(0.1 * mI * nuV, 0.95 * PcVals[ind], step)
# grid.append((aIBi, PVals))
# points = []
# DPg = 0
# for ind, vertsec in enumerate(grid):
# (aIBi, PVals) = vertsec
# for P in PVals:
# DP_stable = DP_interp(DPg, P, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# if (DP_stable == -1 or DP_stable > DPMax):
# print([aIBi, P, DP_stable])
# break
# points.append([aIBi, P, DP_stable])
# pointsarray = np.array(points)
# np.savetxt("aIB_P_DP_points.csv", pointsarray)
# # DATA PROCESSING
# points = np.genfromtxt('aIB_P_DP_points.csv', delimiter=' ')
# aIBip = points[:, 0]
# Pp = points[:, 1]
# DPp = points[:, 2]
# aSip = aSi_interp(DPp, aSi_tck)
# PBp = PB_interp(DPp, aIBip, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# Ep = Eup(Pp, PBp, aIBip, aSip, mI, mB, n0)
# rMp = rMass(Pp, PBp, mI)
# # aIBi, P, DP, aSi, PB, E, rM
# dat = np.concatenate((points, aSip[:, np.newaxis], PBp[:, np.newaxis], Ep[:, np.newaxis], rMp[:, np.newaxis]), axis=1)
# np.savetxt("fmdat.csv", dat)
# # IMPURITY MOMENTUM VS. INTERACTIONS
# aIBiVals = np.linspace(-10, 10, 100)
# Pc_min = PCrit(np.amin(aIBiVals), gBB, mI, mB, n0)
# PVals = np.linspace(0.1 * mI * nuV, 0.95 * Pc_min, 4)
# fig, ax = plt.subplots()
# colortyp = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'y', 'k'])
# ax.plot(aIBiVals, np.zeros(aIBiVals.size), 'k', label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%d$' % 0)
# DPg = 0
# for indp, P in enumerate(PVals):
# DPVals = np.zeros(aIBiVals.size)
# for inda, aIBi in enumerate(aIBiVals):
# DP_stable = DP_interp(DPg, P, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# if (DP_stable == -1 or DP_stable > DPMax):
# print([aIBi, P, DP_stable])
# DPVals[inda] = float('nan')
# else:
# DPVals[inda] = DP_stable
# mask = ~np.isnan(DPVals)
# DPValsC = DPVals[mask]
# aIBiValsC = aIBiVals[mask]
# DP_tck = interpolate.splrep(aIBiValsC, DPValsC, s=0)
# DP_int = interpolate.splev(aIBiVals, DP_tck, der=0)
# Pnorm = P / Pc_min
# ax.plot(aIBiVals, DP_int / DPMax, colortyp[indp], label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%.2f$' % Pnorm)
# ax.legend()
# ax.set_xlabel(r'Scattering Length ($a_{IB}^{-1}$)')
# ax.set_ylabel(r'Impurity Momentum ($\frac{\Delta P}{m_{I}\nu_{s}}$)')
# ax.set_title('Impurity Momentum vs Interactions')
# plt.show()
# # fig.savefig('impuritymom.pdf')
# # EFFECTIVE MASS VS. INTERACTIONS
# aIBiVals = np.linspace(-10, 10, 100)
# P = 0.1 * mI * nuV
# fig, ax = plt.subplots()
# DPg = 0
# PBVals = np.zeros(aIBiVals.size)
# for inda, aIBi in enumerate(aIBiVals):
# DP_stable = DP_interp(DPg, P, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# if (DP_stable == -1 or DP_stable > DPMax):
# print([aIBi, P, DP_stable])
# PBVals[inda] = float('nan')
# else:
# PBVals[inda] = PB_interp(DP_stable, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# mask = ~np.isnan(PBVals)
# PBValsC = PBVals[mask]
# aIBiValsC = aIBiVals[mask]
# ax.plot(aIBiVals, rMass(P, PBVals, mI) / mI, 'b', label=r'$P=0.1 m_{I}\nu_{s}$')
# ax.legend()
# ax.set_xlabel(r'Scattering Length ($a_{IB}^{-1}$)')
# ax.set_ylabel(r'Mass ($\frac{M_{pol}}{m_{I}}=\frac{P}{P-P_{B}}$)')
# ax.set_title('Effective Mass vs Interactions')
# plt.show()
# # NUMBER OF EXCITATIONS, Z-FACTOR, ENERGY VS INTERACTION STRENGTH
# aIBiVals = np.linspace(-10, 10, 100)
# Pc_min = PCrit(np.amin(aIBiVals), gBB, mI, mB, n0)
# PVals = np.linspace(0.1 * mI * nuV, 0.95 * Pc_min, 4)
# fig, ax = plt.subplots()
# fig2, ax2 = plt.subplots()
# fig3, ax3 = plt.subplots()
# colortyp = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'y', 'k'])
# # ax.plot(aIBiVals, np.zeros(aIBiVals.size), 'k', label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%d$' % 0)
# DPg = 0
# for indp, P in enumerate(PVals):
# NBVals = np.zeros(aIBiVals.size)
# qpVals = np.zeros(aIBiVals.size)
# EVals = np.zeros(aIBiVals.size)
# for inda, aIBi in enumerate(aIBiVals):
# DP_stable = DP_interp(DPg, P, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# if (DP_stable == -1 or DP_stable > DPMax):
# print([aIBi, P, DP_stable])
# qpVals[inda] = float('nan')
# NBVals[inda] = float('nan')
# EVals[inda] = float('nan')
# else:
# aSi = aSi_interp(DP_stable, aSi_tck)
# PB = PB_interp(DP_stable, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
# NBVals[inda] = num_phonons(aIBi, aSi, gBB, mI, mB, n0)
# qpVals[inda] = qp_residue(aIBi, aSi, gBB, mI, mB, n0)
# EVals[inda] = Eup(P, PB, aIBi, aSi, mI, mB, n0)
# mask = ~np.isnan(NBVals)
# NBValsC = NBVals[mask]
# qpValsC = qpVals[mask]
# aIBiValsC = aIBiVals[mask]
# EValsC = EVals[mask]
# qp_tck = interpolate.splrep(aIBiValsC, qpValsC, s=0)
# qp_int = interpolate.splev(aIBiVals, qp_tck, der=0)
# Pnorm = P / Pc_min
# ax.plot(aIBiVals, NBVals, colortyp[indp], label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%.2f$' % Pnorm)
# ax2.plot(aIBiVals, qp_int, colortyp[indp], label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%.2f$' % Pnorm)
# ax3.plot(aIBiVals, EVals, colortyp[indp], label=r'$\frac{P}{P_{crit}(a_{IB}^{-1}=-10)}=%.2f$' % Pnorm)
# ax.legend()
# ax.set_xlabel(r'Scattering Length ($a_{IB}^{-1}$)')
# ax.set_ylabel(r'Number of Phonons ($N_{ph}$)')
# ax.set_title('Number of Phonons vs Interactions')
# ax2.legend()
# ax2.set_xlabel(r'Scattering Length ($a_{IB}^{-1}$)')
# ax2.set_ylabel(r'Quasiparticle Residue ($e^{-\frac{1}{2}N_{ph}}$)')
# ax2.set_title('Quasiparticle Residue vs Interactions')
# ax3.legend()
# ax3.set_xlabel(r'Scattering Length ($a_{IB}^{-1}$)')
# ax3.set_ylabel(r'Energy)')
# ax3.set_title('Energy vs Interactions')
# plt.show()
# fig.savefig('impuritymom.pdf')
# # ENERGY VS MOMENTUM
# aIBi, P, DP, aSi, PB, E, rM
# aIBiVals = -1 * np.array([0.25, 1, 3, 5, 10])
aIBiVals = -1 * np.array([2])
# aIBiVals = np.array([-5, res - 0.3, res + 0.3, 5])
# fig, ax = plt.subplots()
# fign, axn = plt.subplots()
colortyp = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'y', 'k'])
E1 = []
E2 = []
DPg = 0
for inda, aIBi in enumerate(aIBiVals):
Pc = PCrit(aIBi, gBB, mI, mB, n0)
PVals = np.linspace(0.1 * mI * nuV, 0.95 * Pc, 100)
EVals = np.zeros(PVals.size)
for indp, P in enumerate(PVals):
DP_stable = DP_interp(DPg, P, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
if (DP_stable == -1 or DP_stable > DPMax):
print([aIBi, P, DP_stable])
EVals[indp] = float('nan')
else:
aSi = aSi_interp(DP_stable, aSi_tck)
PB = PB_interp(DP_stable, aIBi, gBB, mI, mB, n0, aSi_tck, PBint_tck)
EVals[indp] = Eup(P, PB, aIBi, aSi, mI, mB, n0)
mask = ~np.isnan(EVals)
EValsC = EVals[mask]
PValsC = PVals[mask]
a0 = aSi0(gBB, mI, mB, n0)
E0 = Eup(0, 0, aIBi, a0, mI, mB, n0)
E_tck = interpolate.splrep(PValsC, EValsC - E0, s=0)
E_int = interpolate.splev(PVals, E_tck, der=0)
E1_int = interpolate.splev(PVals, E_tck, der=1)
# E1_tck = interpolate.splrep(PVals, E1_int, s=0)
E2_int = interpolate.splev(PVals, E_tck, der=2)
E1.append((PVals / Pc, E1_int))
E2.append((PVals / Pc, E2_int))
# ax.plot(PValsC, EValsC - E0, colortyp[inda], label=r'$a_{IB}^{-1}=%.2f$' % aIBi)
# axn.plot(PValsC / Pc, EValsC - E0, colortyp[inda], label=r'$a_{IB}^{-1}=%.2f$' % aIBi)
# create data to be saved (giant file to be saved later)
# PdVec = np.concatenate((aIBi * np.ones(1), PValsC))
# EdVec = np.concatenate((aIBi * np.ones(1), EValsC))
# if(inda == 0):
# Edat = np.concatenate((PdVec[:, np.newaxis], EdVec[:, np.newaxis]), axis=1)
# else:
# Edat = np.concatenate((Edat, PdVec[:, np.newaxis], EdVec[:, np.newaxis]), axis=1)
# save data to file each time instead of one giant file
Edat = np.concatenate((PValsC[:, np.newaxis], EValsC[:, np.newaxis]), axis=1)
np.savetxt(dirpath + '/fmEdata/fmEP_aIBi_%.2f.dat' % (aIBi), Edat)
# np.savetxt("Edat.csv", Edat)
# ax.legend()
# ax.set_xlabel('Momentum ($P$)')
# ax.set_ylabel('Energy ($E-E(P=0)$)')
# ax.set_title('Energy vs Momentum')
# axn.legend()
# axn.set_xlabel(r'Momentum ($\frac{P}{P_{crit}(a_{IB})}$)')
# axn.set_ylabel('Energy ($E-E(P=0)$)')
# axn.set_title('Energy vs Momentum')
# fig2, ax2 = plt.subplots()
# (Pnorm, E1Vals) = E1[0]
# (Pnorm, E2Vals) = E2[0]
# ax2.plot(Pnorm, E1Vals, colortyp[0], label=r'$\frac{\partial E}{\partial P}$')
# ax2.plot(Pnorm, E2Vals, colortyp[1], label=r'$\frac{\partial^{2} E}{\partial P^{2}}$')
# ax2.legend()
# ax2.set_xlabel(r'Momentum ($\frac{P}{P_{crit}(a_{IB})}$)')
# ax2.set_ylabel('Energy Derivatives')
# ax2.set_title(r'Energy Behavior for $a_{IB}^{-1}=%.2f$' % aIBiVals[0])
# fig3, ax3 = plt.subplots()
# (Pnorm, E1Vals) = E1[-2]
# (Pnorm, E2Vals) = E2[-2]
# ax3.plot(Pnorm, E1Vals, colortyp[0], label=r'$\frac{\partial E}{\partial P}$')
# ax3.plot(Pnorm, E2Vals, colortyp[1], label=r'$\frac{\partial^{2} E}{\partial P^{2}}$')
# ax3.legend()
# ax3.set_xlabel(r'Momentum ($\frac{P}{P_{crit}(a_{IB})}$)')
# ax3.set_ylabel('Energy Derivatives')
# ax3.set_title(r'Energy Behavior for $a_{IB}^{-1}=%.2f$' % aIBiVals[-2])
# fig4, ax4 = plt.subplots()
# fig5, ax5 = plt.subplots()
# for ind, aIBi in enumerate(aIBiVals):
# (Pnorm, E1Vals) = E1[ind]
# (Pnorm, E2Vals) = E2[ind]
# ax4.plot(Pnorm, E1Vals, colortyp[ind], label='$a_{IB}^{-1}=%.2f$' % aIBi)
# ax5.plot(Pnorm, E2Vals / E2Vals[0], colortyp[ind], label='$a_{IB}^{-1}=%.2f$' % aIBi)
# ax4.legend()
# ax4.set_xlabel(r'Momentum ($\frac{P}{P_{crit}(a_{IB})}$)')
# ax4.set_ylabel(r'$\frac{\partial E}{\partial P}$')
# ax4.set_title(r'First Derivative of Energy')
# ax5.legend()
# ax5.set_xlabel(r'Momentum ($\frac{P}{P_{crit}(a_{IB})}$)')
# ax5.set_ylabel(r'$M_{P}\frac{\partial^{2} E}{\partial P^{2}}$')
# ax5.set_title(r'Second Derivative of Energy')
# ax5.set_xlim([0, 1])
# ax5.set_ylim([0, 1])
# plt.show()
# fig.savefig('impuritymom.pdf')
| mit |
sumspr/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
tensorflow/probability | tensorflow_probability/python/sts/internal/seasonality_util.py | 1 | 9383 | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for inferring and representing seasonality."""
import collections
import enum
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.internal import prefer_static as ps
class SeasonTypes(enum.Enum):
SECOND_OF_MINUTE = 0,
MINUTE_OF_HOUR = 1,
HOUR_OF_DAY = 2,
DAY_OF_WEEK = 3
MONTH_OF_YEAR = 4
SeasonConversion = collections.namedtuple(
'SeasonConversion', ['num', 'duration'])
_SEASONAL_PROTOTYPES = collections.OrderedDict({
SeasonTypes.SECOND_OF_MINUTE: SeasonConversion(num=60, duration=1),
SeasonTypes.MINUTE_OF_HOUR: SeasonConversion(num=60, duration=60),
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=3600),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=86400)
})
def create_seasonal_structure(frequency, num_steps, min_cycles=2):
"""Creates a set of suitable seasonal structures for a time series.
Args:
frequency: a Pandas `pd.DateOffset` instance.
num_steps: Python `int` number of steps at the given frequency.
min_cycles: Python `int` minimum number of cycles to include an effect.
Returns:
A dictionary of SeasonConversion instances representing different
seasonal components.
Example 1: For data.index.freq: pd.DateOffset(hours=1)
Seasonal components:
{
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=1),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=24)
}
Example 2: For data.index.freq: pd.DateOffset(seconds=30)
Seasonal components:
{
SeasonTypes.SECOND_OF_MINUTE: SeasonConversion(num=2, duration=1),
SeasonTypes.MINUTE_OF_HOUR: SeasonConversion(num=60, duration=2),
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=120),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=2880)
}
If the frequency is N times per year, for integer 2 <= N <= 12 (e.g.,
12 for monthly or 4 for quarterly), then a fixed structure of (N, 1)
will be created.
"""
num_periods = periods_per_year(frequency)
if num_periods is not None:
# Fixed structure for monthly or quarterly data.
return {
SeasonTypes.MONTH_OF_YEAR: SeasonConversion(num=num_periods, duration=1)
}
# Compute seasonal components by cycling through _SEASONAL_PROTOTYPES and
# filter out None components.
components = { # pylint: disable=g-complex-comprehension
k: make_component(v,
frequency=frequency,
num_steps=num_steps,
min_cycles=min_cycles)
for k, v in _SEASONAL_PROTOTYPES.items()}
return {k: v for (k, v) in components.items() if v is not None}
def make_component(season_tuple, frequency, num_steps, min_cycles=2):
"""Make a seasonal component from a template component.
This is a helper function to the _create_seasonal_structure() method. It
takes a SeasonConversion instance from _SEASONAL_PROTOTYPES and
creates a seasonal component based on the number of observations
`num_steps` in the data and the time series frequency `freq_sec`. A
custom seasonal component is created if it fulfills 4 conditions:
Condition 1: time series must cover at least _MIN_CYCLES full cycles.
Condition 2: full cycle must be a multiple of the granularity.
Condition 3: if the season is longer than the granularity, it must be a
multiple of the granularity.
Condition 4: number of seasons must be greater than 1.
Args:
season_tuple: an `SeasonConversion` instance the number of
seasons, and season duration for a template seasonal component e.g.
(60, 1) for seconds-of-minute or (60, 60) for minute-of-hour.
See _SEASONAL_PROTOTYPES for more details.
frequency: a `pd.DateOffset` instance.
num_steps: Python `int` number of steps at the given frequency.
min_cycles: Python `int` minimum number of cycles to include an effect.
Returns:
An `SeasonConversion` instance, where num and duration
is the inferred structure for the seasonal component. If a seasonal
component can not be created it returns None for that component.
"""
freq_sec = freq_to_seconds(frequency)
if not freq_sec:
return None
num_seasons = season_tuple.num
duration_seconds = season_tuple.duration
# None component returned if no component can be created below.
component = None
# Condition 1: time series must cover at least _MIN_CYCLES full cycles.
minimum_observations = ((num_seasons * duration_seconds * min_cycles) /
freq_sec)
cond1 = num_steps >= minimum_observations
# Condition 2: full cycle must be a multiple of the granularity.
cond2 = (num_seasons * duration_seconds) % freq_sec == 0
# Condition 3: if the season is longer than the granularity, it must be a
# multiple of the granularity.
cond3 = ((duration_seconds <= freq_sec) or
(duration_seconds % freq_sec == 0))
if cond1 and cond2 and cond3:
nseasons = min(num_seasons * duration_seconds /
freq_sec, num_seasons)
season_duration = max(duration_seconds / freq_sec, 1)
# Condition 4: number of seasons must be greater than 1.
cond4 = ((nseasons > 1) and (nseasons <= num_seasons))
if cond4:
component = SeasonConversion(
num=int(nseasons),
duration=int(season_duration))
return component
def _design_matrix_for_one_seasonal_effect(num_steps, duration, period, dtype):
current_period = np.int32(np.arange(num_steps) / duration) % period
return np.transpose([
ps.where(current_period == p, # pylint: disable=g-complex-comprehension
ps.ones([], dtype=dtype),
ps.zeros([], dtype=dtype))
for p in range(period)])
def build_fixed_effects(num_steps,
seasonal_structure=None,
covariates=None,
dtype=tf.float32):
"""Builds a design matrix treating seasonality as fixed-effects regression."""
if seasonal_structure is None:
seasonal_structure = {}
if seasonal_structure:
design_matrix = ps.concat(
[
_design_matrix_for_one_seasonal_effect(
num_steps, seasonal_effect.duration, seasonal_effect.num, dtype)
for seasonal_effect in seasonal_structure.values()
], axis=-1)
else:
design_matrix = ps.ones([num_steps, 1], dtype=dtype)
if covariates:
design_matrix = ps.concat(
[design_matrix] +
[tf.convert_to_tensor(x)[..., :num_steps, :] for x in covariates],
axis=-1)
return design_matrix
def freq_to_seconds(freq):
"""Converts time series DateOffset frequency to seconds."""
if not freq:
return None
if not is_fixed_duration(freq):
return None
freq_secs = 0.
for kwds_unit, kwds_value in freq.kwds.items():
switch_to_seconds = {
'weeks': kwds_value * 60 * 60 * 24 * 7,
'days': kwds_value * 60 * 60 * 24,
'hours': kwds_value * 60 * 60,
'minutes': kwds_value * 60,
'seconds': kwds_value
}
freq_secs += switch_to_seconds[kwds_unit]
return freq_secs
def periods_per_year(frequency):
"""Counts number of steps that equal a year, if defined and 2 <= N <= 12."""
# pylint: disable=unused-import,g-import-not-at-top
import pandas as pd # Defer import to avoid a package-level Pandas dep.
# pylint: enable=unused-import,g-import-not-at-top
if is_fixed_duration(frequency):
return None # No fixed duration divides both leap and non-leap years.
start = pd.Timestamp('1900-01-01')
# Align the start date with any constraints imposed by the frequency, e.g.,
# `pd.offsets.MonthEnd()`.
start = (start + frequency) - frequency
end = start + pd.DateOffset(years=1)
for num_steps in range(2, 13):
if start + num_steps * frequency == end:
return num_steps
return None
def is_fixed_duration(frequency):
"""Determines if a `pd.DateOffset` represents a fixed number of seconds."""
# pylint: disable=unused-import,g-import-not-at-top
import pandas as pd # Defer import to avoid a package-level Pandas dep.
# pylint: enable=unused-import,g-import-not-at-top
# Most Pandas offsets define `self.nanos` if and only if they are
# fixed-duration (this is checked below), but `pd.DateOffset` doesn't do
# this for some reason, so handle this case explicitly.
if type(frequency) == pd.DateOffset: # pylint: disable=unidiomatic-typecheck
if frequency.kwds.get('months', 0) != 0:
return False
if frequency.kwds.get('years', 0) != 0:
return False
return True
# Handle custom frequencies like `pd.offsets.MonthsEnd()`.
try:
frequency.nanos
except ValueError:
return False
return True
| apache-2.0 |
yousrabk/mne-python | examples/visualization/plot_topo_customized.py | 17 | 1914 | """
========================================
Plot custom topographies for MEG sensors
========================================
This example exposes the `iter_topography` function that makes it
very easy to generate custom sensor topography plots.
Here we will plot the power spectrum of each channel on a topographic
layout.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import compute_raw_psd
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20)
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = compute_raw_psd(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
| bsd-3-clause |
mugizico/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
Reslix/AutoArxiv | autoarxiv/warden/scripts/article_sorting.py | 1 | 3550 | """
For the time being, the most important heuristic for determining article priority will be based on the presence of
authors
"""
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from warden.models import AuthorRating, ArticleRating
class AuthorRank:
"""
Given a member and a set of articles, return articles rated on author ratings.
The more authors per article that have a good rating, the higher the article rating.
"""
@classmethod
def rank(cls, member, articles):
author_ratings = AuthorRating.objects.filter(member=member)
authors = {x.author: x.rating for x in author_ratings}
ranks = {}
for article in articles:
a = article.authors.all()
ranks[article] = sum([authors.get(author, 0) for author in a])
return ranks
class CompositeRank:
"""
Given a member and a set of articles, return articles rated on a set of article criteria:
Basically everything, including a composite author rating. The weights will be learned.
For now, only the abstracts of articles will be used to reduce the dimensionality of the problem space.
TODO:
1. autoencoder feature learning using pytorch into collaborative filtering
2. bayesian something something
3. category rating learning
"""
def rank(self, member, articles):
ranks = {}
labeled = list(ArticleRating.objects.filter(member=member))
if len(labeled) > 0:
labeled_text = [rating.article.abstract for rating in labeled]
labeled_authors = [rating.article.authors.all() for rating in labeled]
labels = [rating.rating for rating in labeled]
unlabeled_text = [article.abstract for article in articles]
model, count_vect, tfidf_transformer = self.train_model(labeled_text, labels)
predictions = self.predict(model, count_vect, tfidf_transformer, unlabeled_text)
author_rating = {}
for label, l_authors in zip(labels, labeled_authors):
for author in l_authors:
if author in author_rating:
author_rating[author] += label
else:
author_rating[author] = label
author_pred = [sum([author_rating.get(author, 0) for author in article.authors.all()]) for article in
articles]
for article, author_pred, prediction in zip(articles, author_pred, predictions):
ranks[article] = (author_pred, prediction)
else:
ranks = {article: (0, 0) for article in articles}
return ranks
def predict(self, model, count_vect, tfidf_transformer, text):
counts = count_vect.transform(text)
tfidf = tfidf_transformer.transform(counts)
return model.predict(tfidf)
def train_model(self, text, labels):
"""
This is a SVM that uses tfidf vectors as features. In the future, we want to use a more sophisticated
model for recommendation, but this should suffice on naive examples (there's no basis for this assumption).
:param text:
:return:
"""
clf = svm.SVR()
count_vect = CountVectorizer()
tfidf_transformer = TfidfTransformer()
counts = count_vect.fit_transform(text)
tfidf = tfidf_transformer.fit_transform(counts)
clf.fit(tfidf, labels)
return clf, count_vect, tfidf_transformer
| mit |
ddebrunner/streamsx.topology | samples/python/topology/scikit-learn/digit_predict.py | 2 | 1105 | # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2018
import os
from sklearn.externals import joblib
import streamsx.ec
class DigitPredictor(object):
"""
Callable class that loads the model from a file in
its context manager methods.
"""
def __init__(self, model_path):
# Note this method is only called when the topology is
# declared to create a instance to use in the map function.
self.model_path = model_path
self.clf = None
def __call__(self, image):
"""Predict the digit from the image.
"""
return {'image':image, 'digit':self.clf.predict(image.reshape(1,-1))[0]}
def __enter__(self):
"""Load the model from a file.
"""
# Called at runtime in the IBM Streams job before
# this instance starts processing tuples.
self.clf = joblib.load(
os.path.join(streamsx.ec.get_application_directory(), self.model_path))
def __exit__(self, exc_type, exc_value, traceback):
# __enter__ and __exit__ must both be defined.
pass
| apache-2.0 |
sinhrks/numpy | numpy/lib/npyio.py | 42 | 71218 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
XiaoxiaoLiu/morphology_analysis | utilities/global_feature_calculation.py | 1 | 4097 | import os
import scipy.stats
import numpy
import matplotlib.pylab as pl
import pandas as pd
# program path on this machine
#===================================================================
#blastneuron_DIR = "/home/xiaoxiaol/work/src/blastneuron"
blastneuron_DIR = "/Users/xiaoxiaoliu/work/src/blastneuron"
PRUNE_SHORT_BRANCH = blastneuron_DIR + "/bin/prune_short_branch"
PRE_PROCESSING = blastneuron_DIR + "/bin/pre_processing"
NEURON_RETRIEVE = blastneuron_DIR + "/bin/neuron_retrieve"
BATCH_COMPUTE = blastneuron_DIR + "/bin/batch_compute" # compute faetures
V3D="/Users/xiaoxiaoliu/work/v3d/v3d_external/bin/vaa3d64.app/Contents/MacOS/vaa3d64"
#V3D="/local1/xiaoxiaol/work/v3d/v3d_external/bin/vaa3d"
data_DIR = '/Volumes/mat/xiaoxiaol/data/lims2/nr_june_25_filter_aligned/apical'
original_data_linker_file = data_DIR+'/original/mylinker.ano' # will be genereated
preprocessed_data_linker_file = data_DIR+'/preprocessed/mylinker.ano'
feature_file = data_DIR + '/preprocessed/prep_features.nfb'
#===================================================================
def prune(inputswc_fn, outputswc_fn):
cmd = 'cd '+data_DIR+'/pruned/'
os.system(cmd)
cmd = PRUNE_SHORT_BRANCH + " -i "+inputswc_fn + " -o "+outputswc_fn
os.system(cmd)
print cmd
return
def preprocessing(inputswc_fn, outputswc_fn):
cmd = 'cd '+data_DIR+'/preprocessed/'
os.system(cmd)
cmd = PRE_PROCESSING+ " -i "+inputswc_fn + " -o "+outputswc_fn
os.system(cmd)
return
def neuronretrive(inputswc_fn, feature_fn, result_fn, retrieve_number, logfile):
cmd = NEURON_RETRIEVE + " -d " + feature_fn + " -q " +inputswc_fn + " -n "+ \
str(retrieve_number) +" -o "+result_fn+" -m 1,3" + " >" + logfile
print cmd
os.system(cmd)
return
def featurecomputing(input_linker_fn, feature_fn):
cmd = 'cd '+data_DIR+'/preprocessed/'
os.system(cmd)
cmd = BATCH_COMPUTE + " -i "+input_linker_fn + " -o " + feature_fn
os.system(cmd)
print cmd
return
#def genLinkerFile(swcDir, linker_file):
# cmd = V3D + " -x linker_file_gen -f linker -i "+ swcDir +" -o "+ linker_file +" -p 1"
# print cmd
# os.system(cmd)
# return
def removeLinkerFilePath(inputLinkFile, outputLinkFile):
with open(outputLinkFile, 'w') as out_f:
with open (inputLinkFile,'r') as in_f:
for inline in in_f:
outline = 'SWCFILE=' + inline.split('/')[-1]
out_f.write(outline)
in_f.close()
out_f.close()
return
def genLinkerFileFromList(listCSVFile, linkFile):
df = pd.read_csv(listCSVFile, sep=',',header=0)
fns = df.orca_path
with open(linkFile, 'w') as f:
for i in range(len(fns)):
line = "SWCFILE="+fns[i]+'\n'
f.write(line)
f.close()
return
def pullListFromDB(outputFolder):
#outputListCSVFile = outputFolder +'/list.csv'
# copy data to local disk?
return
#==================================================================================================
def main():
#TODO: pullListFromDB() update from lims2 to grab all neuron reconstructions into list.csv
#genLinkerFileFromList(data_DIR+'/list.csv', original_data_linker_file)
if not os.path.exists(data_DIR+'/pruned'):
os.mkdir(data_DIR+'/pruned')
if not os.path.exists(data_DIR+'/preprocessed'):
os.mkdir(data_DIR+'/preprocessed')
with open(original_data_linker_file,'r') as f:
for line in f:
input_swc_path = data_DIR+'/original/'+ (line.strip()).split('=')[1] #SWCFILE=*
swc_fn = input_swc_path.split('/')[-1]
print swc_fn
pruned_swc_fn = data_DIR+'/pruned/'+ swc_fn
prune(input_swc_path, pruned_swc_fn)
preprocessed_swc_fn = data_DIR+'/preprocessed/'+ swc_fn
preprocessing(pruned_swc_fn, preprocessed_swc_fn)
#removeLinkerFilePath(original_data_linker_file, preprocessed_data_linker_file)
##batch computing
featurecomputing(preprocessed_data_linker_file,feature_file)
if __name__ == "__main__":
main()
| gpl-3.0 |
JonasWallin/BayesFlow | src/HMres.py | 1 | 12303 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 18:35:14 2014
@author: johnsson
"""
from __future__ import division
import os
from mpi4py import MPI
import numpy as np
import json
import matplotlib.pyplot as plt
from .HMplot import HMplot
from .HMlog import HMlogB, HMElog
from .PurePython.GMM import mixture
from .utils.dat_util import load_fcdata, sampnames_scattered
from .utils.results_mem_efficient import Mres, Traces, MimicSample, Components, MetaData
from .utils.initialization.distributed_data import DataMPI
from .utils.initialization.EM import EMD_to_generated_from_model
from .exceptions import BadQualityError
class HMres(Mres):
"""
Class for processing results from MCMC simulation, e.g. for merging and computing dip test
"""
def __init__(self, hmlog, hmlog_burn, data, meta_data,
comm=MPI.COMM_WORLD, maxnbrsucocol=7):
self.comm = comm
self.rank = comm.Get_rank()
if self.rank == 0:
self.noise_class = hmlog.noise_class
if self.noise_class:
self.p_noise = hmlog.prob_sim_mean[:, hmlog.K]
self.noise_mu = hmlog.noise_mu
self.noise_sigma = hmlog.noise_sigma
else:
self.p_noise = None
self.meta_data = MetaData(meta_data)
self.meta_data.sort(hmlog.names)
self.names = self.meta_data.samp['names']
self.data = [data[j] for j in self.meta_data.order]
super(HMres, self).__init__(hmlog.d, hmlog.K, hmlog.prob_sim_mean[:, :hmlog.K],
hmlog.classif_freq, self.p_noise, hmlog.sim,
maxnbrsucocol)
self.sim = hmlog.sim
self.active_komp = hmlog.active_komp
self.traces = Traces(hmlog_burn, hmlog)
self.mimics = {}
#print("hmlog.savesampnames = {}".format(hmlog.savesampnames))
for i, name in enumerate(hmlog.savesampnames):
j = hmlog.names.index(name)
self.mimics[name] = MimicSample(self.data[j], name, hmlog.Y_sim[i], 'BHM_MCMC')
datapooled = np.vstack(data)[np.random.choice(range(sum([dat.shape[0] for dat in data])),
hmlog.Y_pooled_sim.shape[0], replace=False), :]
self.mimics['pooled'] = MimicSample(datapooled, 'pooled', hmlog.Y_pooled_sim, 'BHM_MCMC')
self.components = Components(hmlog, self.p)
self.plot = HMplot(self)
self.quality = {}
self.mergeind = self.mergeind # make all attributes have same mergeind
@classmethod
def load(cls, savedir, data_kws, comm=MPI.COMM_SELF, no_postproc=False,
verbose=True):
hmlog_burn = HMlogB.load(savedir, comm=comm)
hmlog = HMElog.load(savedir, comm=comm)
hmlog.prob_sim_mean *= hmlog.active_komp # compensate for reweighting in HMlog.postproc
metadata = data_kws.copy()
marker_lab = metadata.pop('marker_lab')
ext = metadata.pop('ext')
loadfilef = metadata.pop('loadfilef')
if 'datadir' in metadata:
datadir = metadata.pop('datadir')
data = load_fcdata([datadir], ext, loadfilef, sampnames=hmlog.names,
comm=comm, **metadata)
else:
datadirs = metadata.pop('datadirs')
data = load_fcdata(datadirs, ext, loadfilef, sampnames=hmlog.names,
comm=comm, **metadata)
names = hmlog.names
metadata.update(marker_lab=marker_lab, samp={'names': names})
if verbose:
print("metadata = {}".format(metadata))
res = cls(hmlog, hmlog_burn, data, metadata, comm=comm)
if not no_postproc:
try:
if verbose:
print("savedir = {}".format(savedir))
with open(os.path.join(savedir, 'postproc_results.json'), 'r') as f:
postproc_res = json.load(f)
except IOError:
pass
else:
for attr in ['_earth_movers_distance_to_generated', 'pdiplist']:
try:
setattr(res, attr, np.load(os.path.join(savedir, attr+'.npy')))
except IOError:
pass
for attr in postproc_res:
setattr(res, attr, postproc_res[attr])
return res
def save(self, savedir):
if self.rank == 0:
savedict = {}
for attr in ['mergeind', 'postproc_par', '_emd_dims',
'quality', 'merged']:
try:
savedict[attr] = getattr(self, attr)
except AttributeError:
pass
with open(os.path.join(savedir, 'postproc_results.json'), 'w') as f:
json.dump(savedict, f)
for attr in ['_earth_movers_distance_to_generated', 'pdiplist']:
try:
np.save(os.path.join(savedir, attr+'.npy'), getattr(self, attr))
except AttributeError:
pass
@Mres.mergeind.setter
def mergeind(self, mergeind):
Mres.mergeind.fset(self, mergeind)
if hasattr(self, 'components'):
self.components.mergeind = self._mergeind
self.components.suco_ord = self.suco_ord
self.components.comp_ord = self.comp_ord
self.components.suco_colors = self.suco_colors
self.components.comp_colors = self.comp_colors
if hasattr(self, 'traces'):
self.traces.comp_ord = self.comp_ord
# def check_active_komp(self):
# if ((self.active_komp > 0.05)*(self.active_komp < 0.95)).any():
# self.quality['ok_active_komp'] = False
# raise BadQualityError('Active components not in ok range')
# else:
# self.quality['ok_active_komp'] = True
def check_convergence(self):
if 'convergence' in self.quality:
if self.quality['convergence'] == 'no':
raise BadQualityError('No convergence')
else:
return
self.traces.plot.all(fig=plt.figure(figsize=(18, 4)), yscale=True)
self.traces.plot.nu()
self.traces.plot.nu_sigma()
plt.show()
print("Are trace plots ok? (y/n)")
while 1:
ans = raw_input()
if ans.lower() == 'y':
self.quality['convergence'] = 'yes'
break
if ans.lower() == 'n':
self.quality['convergence'] = 'no'
raise BadQualityError('Trace plots not ok')
print("Bad answer. Are trace plots ok? (y/n)")
def check_noise(self, noise_lim=0.01):
if self.noise_class:
self.quality['max_p_noise'] = np.max(self.p_noise)
if (self.p_noise > noise_lim).any():
raise BadQualityError('Too high noise level')
def check_outliers(self):
bh_out = np.sum(self.components.get_latent_bhattacharyya_overlap_quotient() < 1)
eu_out = np.sum(self.components.get_center_distance_quotient() < 1)
self.quality['outliers'] = {'bhat': bh_out, 'eucl_loc': eu_out}
if bh_out+eu_out > 0:
raise BadQualityError('Not closest to own latent component, bhat: {}, eu: {}'.format(
bh_out, eu_out))
def check_dip(self, savedir=None, tol=20):
self.get_pdip()
fig_dip = self.plot.pdip()
fig_dip_summary = self.plot.pdip_summary()
if not savedir is None:
fig_dip.savefig(os.path.join(savedir, 'dip.pdf'), type='pdf',
transparent=False, bbox_inches='tight')
fig_dip_summary.savefig(os.path.join(savedir, 'dip_summary.pdf'),
type='pdf', transparent=False, bbox_inches='tight')
else:
plt.show()
if (np.sum(np.array(self.get_pdip(suco=True)) < 0.28)) > tol:
raise BadQualityError('Too many pdip not ok')
def check_emd(self, N=5, savedir=None):
emd, emd_dim = self.earth_movers_distance_to_generated()
self.quality['emd'] = {'min': np.min(emd), 'max': np.max(emd), 'median': np.median(emd)}
fig, ax = plt.subplots(figsize=(15, 4))
im = ax.imshow(emd.T, interpolation='None')
plt.colorbar(im, orientation='horizontal')
top_N = zip(*np.unravel_index(np.argpartition(-emd.ravel(), N)[:N], emd.shape))
fig_fit, axs = plt.subplots(N, 4, figsize=(15, 15))
for i, (j, i_dim) in enumerate(top_N):
self.plot.component_fit([emd_dim[i_dim]], name=self.names[j], axs=axs[i, :].reshape(1, -1))
if not savedir is None:
fig.savefig(os.path.join(savedir, 'emd.pdf'), type='pdf',
transparent=False, bbox_inches='tight')
fig_fit.savefig(os.path.join(savedir, 'fit_max_emd.pdf'), type='pdf',
transparent=False, bbox_inches='tight')
else:
plt.show()
def check_quality(self, savedir=None, N_emd=5, noise_lim=0.01):
#self.check_active_komp()
self.check_noise(noise_lim)
#self.check_convergence()
self.check_outliers()
self.check_dip(savedir)
self.check_emd(N_emd, savedir)
def merge(self, method, thr, **mmfArgs):
if self.rank == 0:
if method == 'bhat':
super(HMres, self).greedy_merge(self.components.get_median_bh_overlap,
thr, **mmfArgs)
super(HMres, self).self.gclean()
else:
super(HMres, self).merge(method, thr, **mmfArgs)
#self.components.mergeind = self.mergeind
#self.plot = HMplot(self, self.meta_data.marker_lab)
for i in range(self.comm.Get_size()):
self.comm.send(self.mergeind, dest=i, tag=2)
else:
self.mergeind = self.comm.recv(source=0, tag=2)
self.postproc_par = {'method': method, 'thr': thr, 'mmfArgs': mmfArgs}
def get_bh_distance_to_own_latent(self):
return self.components.get_bh_distance_to_own_latent()
def get_center_dist(self):
return self.components.get_center_dist()
def get_mix(self, j, debug=False):
active = self.active_komp[j, :] > 0.05
mus = [self.components.mupers[j, k, :] for k in range(self.K) if active[k]]
Sigmas = [self.components.Sigmapers[j, k, :, :] for k in range(self.K) if active[k]]
ps = [self.components.p[j, k] for k in range(self.K) if active[k]]
if self.noise_class:
mus.append(self.noise_mu)
Sigmas.append(self.noise_sigma)
ps.append(self.p_noise[j])
if debug:
print("np.sum(ps) = {}".format(np.sum(ps)))
ps /= np.sum(ps) # renormalizing
return mus, Sigmas, np.array(ps)
def generate_from_mix(self, j, N):
mus, Sigmas, ps = self.get_mix(j)
return mixture.simulate_mixture(mus, Sigmas, ps, N)
@property
def K_active(self):
return np.sum(np.sum(self.active_komp > 0.05, axis=0) > 0)
def earth_movers_distance_to_generated(self, gamma=1):
if hasattr(self, '_earth_movers_distance_to_generated'):
return self._earth_movers_distance_to_generated, self._emd_dims
emds = []
dims = [(i, j) for i in range(self.d) for j in range(i+1, self.d)]
for j, dat in enumerate(self.data):
mus, Sigmas, ps = self.get_mix(j)
N_synsamp = int(dat.shape[0]//10)
emds.append(
np.array(EMD_to_generated_from_model(
DataMPI(MPI.COMM_SELF, [dat]), mus, Sigmas, ps, N_synsamp,
gamma=gamma, nbins=50, dims=dims))
* (1./N_synsamp))
print("\r EMD computed for {} samples".format(j+1),)
print("\r ",)
print("")
self._earth_movers_distance_to_generated = np.vstack(emds)
self._emd_dims = dims
print("dims = {}".format(dims))
return self._earth_movers_distance_to_generated, self._emd_dims
| gpl-2.0 |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/shapes_and_collections/artist_reference.py | 6 | 2838 | """
Reference for matplotlib artists
This example displays several of matplotlib's graphics primitives (artists)
drawn using matplotlib API. A full list of artists and the documentation is
available at http://matplotlib.org/api/artist_api.html.
Copyright (c) 2010, Bartosz Telenczuk
BSD License
"""
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
def label(xy, text):
y = xy[1] - 0.15 # shift y-value for label so that it's below the artist
plt.text(xy[0], y, text, ha="center", family='sans-serif', size=14)
fig, ax = plt.subplots()
# create 3x3 grid to plot the artists
grid = np.mgrid[0.2:0.8:3j, 0.2:0.8:3j].reshape(2, -1).T
patches = []
# add a circle
circle = mpatches.Circle(grid[0], 0.1,ec="none")
patches.append(circle)
label(grid[0], "Circle")
# add a rectangle
rect = mpatches.Rectangle(grid[1] - [0.025, 0.05], 0.05, 0.1, ec="none")
patches.append(rect)
label(grid[1], "Rectangle")
# add a wedge
wedge = mpatches.Wedge(grid[2], 0.1, 30, 270, ec="none")
patches.append(wedge)
label(grid[2], "Wedge")
# add a Polygon
polygon = mpatches.RegularPolygon(grid[3], 5, 0.1)
patches.append(polygon)
label(grid[3], "Polygon")
#add an ellipse
ellipse = mpatches.Ellipse(grid[4], 0.2, 0.1)
patches.append(ellipse)
label(grid[4], "Ellipse")
#add an arrow
arrow = mpatches.Arrow(grid[5, 0]-0.05, grid[5, 1]-0.05, 0.1, 0.1, width=0.1)
patches.append(arrow)
label(grid[5], "Arrow")
# add a path patch
Path = mpath.Path
path_data = [
(Path.MOVETO, [ 0.018, -0.11 ]),
(Path.CURVE4, [-0.031, -0.051]),
(Path.CURVE4, [-0.115, 0.073]),
(Path.CURVE4, [-0.03 , 0.073]),
(Path.LINETO, [-0.011, 0.039]),
(Path.CURVE4, [ 0.043, 0.121]),
(Path.CURVE4, [ 0.075, -0.005]),
(Path.CURVE4, [ 0.035, -0.027]),
(Path.CLOSEPOLY, [0.018, -0.11])
]
codes, verts = zip(*path_data)
path = mpath.Path(verts + grid[6], codes)
patch = mpatches.PathPatch(path)
patches.append(patch)
label(grid[6], "PathPatch")
# add a fancy box
fancybox = mpatches.FancyBboxPatch(
grid[7] - [0.025, 0.05], 0.05, 0.1,
boxstyle=mpatches.BoxStyle("Round", pad=0.02))
patches.append(fancybox)
label(grid[7], "FancyBoxPatch")
# add a line
x,y = np.array([[-0.06, 0.0, 0.1], [0.05, -0.05, 0.05]])
line = mlines.Line2D(x + grid[8, 0], y + grid[8, 1], lw=5., alpha=0.3)
label(grid[8], "Line2D")
colors = np.linspace(0, 1, len(patches))
collection = PatchCollection(patches, cmap=plt.cm.hsv, alpha=0.3)
collection.set_array(np.array(colors))
ax.add_collection(collection)
ax.add_line(line)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.axis('equal')
plt.axis('off')
plt.show()
| mit |
lenovor/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
flowmatters/veneer-py | setup.py | 2 | 1672 | RELEASE = True
import codecs
from setuptools import setup, find_packages
import sys, os
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: ISC License (ISCL)
Operating System :: OS Independent
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development :: Libraries :: Python Modules
"""
version = '0.1'
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
setup(
name='veneer-py',
version=version,
description="Support for scripting eWater Source models through the Veneer (RESTful HTTP) plugin",
packages=["veneer"],
long_description=read("README.md"),
classifiers=filter(None, classifiers.split("\n")),
keywords='hydrology ewater veneer scripting http rest',
author='Joel Rahman',
author_email='[email protected]',
url='https://github.com/flowmatters/veneer-py',
#download_url = "http://cheeseshop.python.org/packages/source/p/Puppy/Puppy-%s.tar.gz" % version,
license='ISC',
py_modules=['veneer'],
include_package_data=True,
zip_safe=True,
test_suite = 'nose.collector',
install_requires=[
'numpy',
'pandas'
],
extras_require={
'test': ['nose'],
},
)
| isc |
dimkal/mne-python | tutorials/plot_cluster_stats_time_frequency.py | 16 | 5437 | """
.. _tut_stats_cluster_sensor_2samp_tfr:
=========================================================================
Non-parametric between conditions cluster statistic on single trial power
=========================================================================
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
- extracting epochs for 2 conditions
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if the power estimates are significantly different
between conditions.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = raw.info['ch_names'][picks[0]]
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
data_condition_1 = epochs_condition_1.get_data() # as 3D matrix
data_condition_1 *= 1e13 # change unit to fT / cm
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
data_condition_2 = epochs_condition_2.get_data() # as 3D matrix
data_condition_2 *= 1e13 # change unit to fT / cm
# Take only one channel
data_condition_1 = data_condition_1[:, 97:98, :]
data_condition_2 = data_condition_2[:, 97:98, :]
# Time vector
times = 1e3 * epochs_condition_1.times # change unit to ms
# Factor to downsample the temporal dimension of the PSD computed by
# single_trial_power. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
frequencies = np.arange(7, 30, 3) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
n_cycles = 1.5
epochs_power_1 = single_trial_power(data_condition_1, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
epochs_power_2 = single_trial_power(data_condition_2, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
epochs_power_1 = epochs_power_1[:, 0, :, :] # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :] # only 1 channel to get 3D matrix
# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 0
epochs_baseline_1 = np.mean(epochs_power_1[:, :, baseline_mask], axis=2)
epochs_power_1 /= epochs_baseline_1[..., np.newaxis]
epochs_baseline_2 = np.mean(epochs_power_2[:, :, baseline_mask], axis=2)
epochs_power_2 /= epochs_baseline_2[..., np.newaxis]
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
plt.plot(times, evoked_contrast.T)
plt.title('Contrast of evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 200)
plt.subplot(2, 1, 2)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/io/excel/_openpyxl.py | 1 | 16191 | from typing import TYPE_CHECKING, Dict, List, Optional
import numpy as np
from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import BaseExcelReader, ExcelWriter
from pandas.io.excel._util import validate_freeze_panes
if TYPE_CHECKING:
from openpyxl.descriptors.serialisable import Serialisable
class OpenpyxlWriter(ExcelWriter):
engine = "openpyxl"
supported_extensions = (".xlsx", ".xlsm")
def __init__(
self,
path,
engine=None,
mode: str = "w",
storage_options: StorageOptions = None,
**engine_kwargs,
):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super().__init__(
path, mode=mode, storage_options=storage_options, **engine_kwargs
)
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
# the file and later write to it
if "r+" in self.mode: # Load from existing workbook
from openpyxl import load_workbook
self.book = load_workbook(self.handles.handle)
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
self.book.remove(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
self.book.save(self.handles.handle)
@classmethod
def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]:
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object.
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {"borders": "border"}
style_kwargs: Dict[str, Serialisable] = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object.
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object.
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
"sz": "size",
"b": "bold",
"i": "italic",
"u": "underline",
"strike": "strikethrough",
"vertalign": "vertAlign",
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == "color":
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object.
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import GradientFill, PatternFill
_pattern_fill_key_map = {
"patternType": "fill_type",
"patterntype": "fill_type",
"fgColor": "start_color",
"fgcolor": "start_color",
"bgColor": "end_color",
"bgcolor": "end_color",
}
_gradient_fill_key_map = {"fill_type": "type"}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ["start_color", "end_color"]:
v = cls._convert_to_color(v)
if gk == "stop":
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object.
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {"border_style": "style"}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == "color":
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object.
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == "color":
v = cls._convert_to_color(v)
if k in ["left", "right", "top", "bottom", "diagonal"]:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict["format_code"]
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache: Dict[str, Dict[str, Serialisable]] = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1, column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs: Optional[Dict[str, Serialisable]] = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1,
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
class OpenpyxlReader(BaseExcelReader):
def __init__(
self,
filepath_or_buffer: FilePathOrBuffer,
storage_options: StorageOptions = None,
) -> None:
"""
Reader using openpyxl engine.
Parameters
----------
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
storage_options : dict, optional
passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
"""
import_optional_dependency("openpyxl")
super().__init__(filepath_or_buffer, storage_options=storage_options)
@property
def _workbook_class(self):
from openpyxl import Workbook
return Workbook
def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
from openpyxl import load_workbook
return load_workbook(
filepath_or_buffer, read_only=True, data_only=True, keep_links=False
)
def close(self):
# https://stackoverflow.com/questions/31416842/
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
self.book.close()
super().close()
@property
def sheet_names(self) -> List[str]:
return self.book.sheetnames
def get_sheet_by_name(self, name: str):
return self.book[name]
def get_sheet_by_index(self, index: int):
return self.book.worksheets[index]
def _convert_cell(self, cell, convert_float: bool) -> Scalar:
from openpyxl.cell.cell import TYPE_BOOL, TYPE_ERROR, TYPE_NUMERIC
if cell.is_date:
return cell.value
elif cell.data_type == TYPE_ERROR:
return np.nan
elif cell.data_type == TYPE_BOOL:
return bool(cell.value)
elif cell.value is None:
return "" # compat with xlrd
elif cell.data_type == TYPE_NUMERIC:
# GH5394
if convert_float:
val = int(cell.value)
if val == cell.value:
return val
else:
return float(cell.value)
return cell.value
def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
data: List[List[Scalar]] = []
for row in sheet.rows:
data.append([self._convert_cell(cell, convert_float) for cell in row])
return data
| bsd-3-clause |
lshain-android-source/external-chromium_org | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
rmhyman/DataScience | Lesson3/gradient_descent_subway_data.py | 1 | 4610 | import numpy as np
import pandas
from sklearn.linear_model import SGDRegressor
"""
In this question, you need to:
1) Implement the linear_regression() procedure using gradient descent.
You can use the SGDRegressor class from sklearn, since this class uses gradient descent.
2) Select features (in the predictions procedure) and make predictions.
"""
def normalize_features(features):
'''
Returns the means and standard deviations of the given features, along with a normalized feature
matrix.
'''
means = np.mean(features, axis=0)
std_devs = np.std(features, axis=0)
normalized_features = (features - means) / std_devs
return means, std_devs, normalized_features
def recover_params(means, std_devs, norm_intercept, norm_params):
'''
Recovers the weights for a linear model given parameters that were fitted using
normalized features. Takes the means and standard deviations of the original
features, along with the intercept and parameters computed using the normalized
features, and returns the intercept and parameters that correspond to the original
features.
'''
intercept = norm_intercept - np.sum(means * norm_params / std_devs)
params = norm_params / std_devs
return intercept, params
def linear_regression(features, values):
"""
Perform linear regression given a data set with an arbitrary number of features.
"""
#features = sm.add_constant(features)
model = SGDRegressor(n_iter=30)
#normalize_res = normalize_features(features)
model.fit(features,values)
###########################
### YOUR CODE GOES HERE ###
###########################
intercept = model.intercept_
params = model.coef_
return intercept, params
def predictions(dataframe):
'''
The NYC turnstile data is stored in a pandas dataframe called weather_turnstile.
Using the information stored in the dataframe, let's predict the ridership of
the NYC subway using linear regression with gradient descent.
You can download the complete turnstile weather dataframe here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Your prediction should have a R^2 value of 0.40 or better.
You need to experiment using various input features contained in the dataframe.
We recommend that you don't use the EXITSn_hourly feature as an input to the
linear model because we cannot use it as a predictor: we cannot use exits
counts as a way to predict entry counts.
Note: Due to the memory and CPU limitation of our Amazon EC2 instance, we will
give you a random subset (~50%) of the data contained in
turnstile_data_master_with_weather.csv. You are encouraged to experiment with
this exercise on your own computer, locally.
If you receive a "server has encountered an error" message, that means you are
hitting the 30-second limit that's placed on running your program. Try using a
smaller number of features or fewer iterations.
'''
################################ MODIFY THIS SECTION #####################################
# Select features. You should modify this section to try different features! #
# We've selected rain, precipi, Hour, meantempi, and UNIT (as a dummy) to start you off. #
# See this page for more info about dummy variables: #
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html #
##########################################################################################
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']]
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = features.join(dummy_units)
# Values
values = dataframe['ENTRIESn_hourly']
# Get numpy arrays
features_array = features.values
values_array = values.values
means, std_devs, normalized_features_array = normalize_features(features_array)
# Perform gradient descent
norm_intercept, norm_params = linear_regression(normalized_features_array, values_array)
intercept, params = recover_params(means, std_devs, norm_intercept, norm_params)
predictions = intercept + np.dot(features_array, params)
# The following line would be equivalent:
# predictions = norm_intercept + np.dot(normalized_features_array, norm_params)
return predictions | mit |
maximus009/kaggle-galaxies | check_label_constraints.py | 7 | 2424 | """
This file evaluates all constraints on the training labels as stipulated on the 'decision tree' page, and reports when they are violated.
It uses only the source CSV file for the sake of reproducibility.
"""
import numpy as np
import pandas as pd
TOLERANCE = 0.00001 # 0.01 # only absolute errors greater than this are reported.
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
d = pd.read_csv(TRAIN_LABELS_PATH)
targets = d.as_matrix()[:, 1:].astype('float32')
ids = d.as_matrix()[:, 0].astype('int32')
# separate out the questions for convenience
questions = [
targets[:, 0:3], # 1.1 - 1.3,
targets[:, 3:5], # 2.1 - 2.2
targets[:, 5:7], # 3.1 - 3.2
targets[:, 7:9], # 4.1 - 4.2
targets[:, 9:13], # 5.1 - 5.4
targets[:, 13:15], # 6.1 - 6.2
targets[:, 15:18], # 7.1 - 7.3
targets[:, 18:25], # 8.1 - 8.7
targets[:, 25:28], # 9.1 - 9.3
targets[:, 28:31], # 10.1 - 10.3
targets[:, 31:37], # 11.1 - 11.6
]
# there is one constraint for each question.
# the sums of all answers for each of the questions should be equal to these numbers.
sums = [
np.ones(targets.shape[0]), # 1, # Q1
questions[0][:, 1], # Q2
questions[1][:, 1], # Q3
questions[1][:, 1], # Q4
questions[1][:, 1], # Q5
np.ones(targets.shape[0]), # 1, # Q6
questions[0][:, 0], # Q7
questions[5][:, 0], # Q8
questions[1][:, 0], # Q9
questions[3][:, 0], # Q10
questions[3][:, 0], # Q11
]
num_total_violations = 0
affected_ids = set()
for k, desired_sums in enumerate(sums):
print "QUESTION %d" % (k + 1)
actual_sums = questions[k].sum(1)
difference = abs(desired_sums - actual_sums)
indices_violated = difference > TOLERANCE
ids_violated = ids[indices_violated]
num_violations = len(ids_violated)
if num_violations > 0:
print "%d constraint violations." % num_violations
num_total_violations += num_violations
for id_violated, d_s, a_s in zip(ids_violated, desired_sums[indices_violated], actual_sums[indices_violated]):
print "violated by %d, sum should be %.6f but it is %.6f" % (id_violated, d_s, a_s)
affected_ids.add(id_violated)
else:
print "No constraint violations."
print
print
print "%d violations in total." % num_total_violations
print "%d data points violate constraints." % len(affected_ids) | bsd-3-clause |
airanmehr/bio | Scripts/HLI/Kyrgyz/poly.py | 1 | 1397 | import numpy as np;
import seaborn as sns
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
from matplotlib.backends.backend_pdf import PdfPages
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import pylab as plt;
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Utils.Estimate as est
import Utils.Plots as pplt
import matplotlib as mpl
mpl.rcParams['text.usetex']=False
path='/Users/airanmehr/Downloads/PolygenicAdaptationCode-master/Example/Genome_Data/'
def genome():
a=pd.read_csv(path+'genome.file.txt',sep='\t');col=a.columns[0].split();a=a.reset_index();a.columns=col;a=a.drop_duplicates().rename(columns={'CHR':'CHROM'})
b=a[['CHROM','POS','BVAL']].drop_duplicates().set_index(['CHROM','POS']).BVAL.astype(int)
pplt.Manhattan(b)
return b
b=genome()
a=pd.read_csv(path+'match.file.txt',sep='\t',comment='#');col=a.columns[0].split();a=a.reset_index();a.columns=col;a=a.drop_duplicates().rename(columns={'CHR':'CHROM'})
(a.sort_values('SNP').values!=b[b.CLST=='French'].sort_values('SNP').values).sum()
b.BVAL.value_counts().size
c=pd.concat([a[a.IMP==0].BVAL.value_counts(),a[a.IMP==1].BVAL.value_counts()],1)
c[c.isnull().sum(1)>0]
c
path='/Users/airanmehr/Downloads/PolygenicAdaptationCode-master/Example/Trait_Data/'
a=pd.read_csv(path+'height.txt',sep='\t',comment='#')
a | mit |
pravsripad/jumeg | jumeg/base/jumeg_badchannel_table.py | 2 | 14572 | #!/usr/bin/env python3
# -+-coding: utf-8 -+-
"""
"""
#--------------------------------------------
# Authors: Frank Boers <[email protected]>
#
#--------------------------------------------
# Date: 15.05.19
#--------------------------------------------
# License: BSD (3-clause)
#--------------------------------------------
# Updates
#--------------------------------------------
import os,logging
import pandas as pd
from jumeg.base.jumeg_base import jumeg_base
from jumeg.base.jumeg_logger import get_logger
logger = get_logger()
__version__="2020.04.20.001"
class JuMEG_BadChannelTable(object):
"""
CLS to store noisy/bad channel information for all files within an experiment
HDF5format
id,scan,filename,file path, bads
Example:
--------
from jumeg.base.jumeg_badchannel_table import JuMEG_BadChannelTable
BCT = JuMEG_BadChannelTable(verbose=True)
BCT.open(path=path,fname=fname,overwrite=False)
id = "123456"
scan = "FREEVIEW01"
fname = "$JUMEG_TEST_DATA/mne/211747/FREEVIEW01/180109_0955/1/211747_FREEVIEW01_180109_0955_1_c,rfDC,meeg-raw.fif"
bads = "MEG 007,MEG 142,MEG 156,RFM 011"
BCT.set_bads(id=id,scan=scan,fname=fname,bads=bads)
BCT.show()
fname = "$JUMEG_TEST_DATA/mne/211747/FREEVIEW01/180109_0955/1/211747_FREEVIEW01_180109_0955_2_c,rfDC,meeg-raw.fif"
bads = "MEG 107,MEG 242,MEG 256,RFM 012"
BCT.set_bads(id=id,scan=scan,fname=fname,bads=bads)
BCT.show()
BCT.set_bads(id=id,scan=scan,fname=fname,bads="NIX")
BCT.show()
print("bads: {}".format( BCT.get_badslist(fname=fname) ))
"""
def __init__(self,**kwargs):
self._HDF = None
self._fname = None
self.verbose = False
self.postfix = 'bads'
self.extention = ".hdf5"
self._update_from_kwargs(**kwargs)
def _update_from_kwargs(self,**kwargs):
#self._fname = kwargs.get("filename",self._fname)
self.postfix = kwargs.get("postfix",self.postfix)
self.extention = kwargs.get("extention",self.extention)
self.verbose = kwargs.get("verbose",self.verbose)
def update(self,**kwargs):
self._update_from_kwargs(**kwargs)
#--- HDFobj
@property
def HDF(self):
return self._HDF
@property
def is_open(self):
try:
if self.HDF.is_open:
return True
except:
logger.error(" HDFobj is not open !!!\n")
return False
#--- HDFobj filename
@property
def filename(self):
return self._fname
#---
def update_filename(self,path=None,fname=None,postfix=None,extention=None):
"""
make filename from fname and path
remove file extention add postfix and extention
mkdir if not exist
:param path:
:param fname:
:param postfix:
:param extention:
:return:
hdf fullfilename
"""
if fname:
self._fname = os.path.expandvars(os.path.expanduser(fname))
self._fname = os.path.splitext( fname )[0] + "_" + self.postfix + self.extention
if path:
try:
path = os.path.expandvars(os.path.expanduser(path))
os.makedirs(path,exist_ok=True)
self._fname = os.path.join(path,self._fname)
except:
logger.exception(" can not create HDF directory path: {}".format(path) +
" HDF file: {}".format(self._fname))
return None
if self.verbose:
logger.info(" -> setting HDF file name to: {}".format(self.filename))
return self._fname
#--- init output pandas HDF obj
def init(self,fhdf=None,path=None,fname=None,overwrite=False):
"""create pandas HDF5-Obj and file
Parameters
----------
fhdf : hdf5 filename <None>
fname : fif-filename <None>
hdf_path : path to hdf file <None> if None use fif-file path
overwrite: will overwrite existing output file <False>
Returns
---------
pandas.HDFobj
"""
if not fhdf:
fhdf = self.update_filename(fname=fname,path=path)
if (os.path.exists(fhdf) and overwrite):
if self.verbose:
logger.info("---> HDF file overwriting : {}".format(fhdf))
self.close()
os.remove(fhdf)
# return pd.HDFStore( fhdf,format='table' ) not usefull with float16 !! ????
try:
self._HDF = None
self._HDF = pd.HDFStore(fhdf)
if self.verbose:
logger.info("---> HDF file created: {}".format(fhdf))
except:
logger.exception(" error open or creating HDFobj; e.g.: release lock?" +
" -> HDF filename: {}".format(fhdf))
if self.verbose:
logger.info("---> Open HDF file: {}".format(self.HDF.filename))
return self._HDF
def open(self,fhdf=None,path=None,fname=None,overwrite=False):
"""
open HDF file; pandas HDF5-Obj
if exists and not overwrite
if fhdf use fhdf as fullfilename
else construct filename and mkdir from path and fname
Parameters
----------
fhdf : hdf5 filename or,
fname : fif-filename or
overwrite: <False>
Returns
---------
pandas.HDFStore obj
"""
return self.init(fhdf=fhdf,path=path,fname=fname,overwrite=overwrite)
def close(self):
if self.is_open:
if self.verbose:
logging.debug(" -> HDF closing: {}".format(self.HDF.filename))
self._HDF.close()
def reset_key(self,k):
if kf in self.HDF.keys():
self.HDF.remove(k)
return k
def list_keys_from_node(self,node):
""" get key list from HDF node
Parameters
----------
node: HDFobj node
e.g: for node in HDFobj.keys():
HDFobj["/epcher/M100"]
Returns
---------
key list from node
Example:
---------
self.hdf_obj_list_keys_from_node("/epocher")
[condition1,condition2 ... conditionN ]
"""
return self.HDF.get_node(node)._v_groups.keys()
def get_dataframe(self,key):
""" get pandas dataframe from HDFobj
Parameters
----------
key: full dataframe key </node + /key ... + /keyN>
Returns
----------
pandas dataframe
Example
----------
df = self.hdf_obj_get_dataframe("/epocher/M100")
"""
return self.HDF.get(self.key2hdfkey(key))
def set_dataframe(self,data=None,key=None):
"""set dataframe in HDFobj for key
Parameters
----------
data: pandas dataframe
key : full dataframe key </node + /key ... + /keyN>
Returns
----------
None
Example
----------
self.hdf_obj_set_dataframe(data=<M100-dataframe>,key="/epocher/M100")
"""
self.HDF[key] = data
def obj_get_attributes(self,HStorer=None,key=None,attr=None):
"""
Parameters
----------
HStorer: Hdf Storer Obj, to get information from attribute dict <None>
if None : use self.HDFobj
key : full dataframe key </node + /key ... + /keyN> <None>
attr : name of attribute dictionary
Returns
----------
attribute parameter as dictionary
Example
----------
my_attribut_dict = self.hdf_obj_get_attributes(key="/epocher/M100",attr="epocher_parameter")
epocher_parameter = self.hdf_obj_get_attributes(key=ep_key,attr=self.hdf_obj_attribute_epocher)
"""
if HStorer:
try:
if HStorer.is_exists:
return HStorer.get_storer(key).attrs[attr]
except:
logger.exception("ERROR in hdf_obj_get_attributes => can not store key attributes no such Storer-Obj" +
"HDF : {}\n".format(self.HDF.filename) +
"key : {}\n".format(key) +
"Attr: {}\n".format(attr))
return
elif self.is_open:
return self.HDF.get_storer(key).attrs[attr]
def update_dataframe(self,df,key=None,reset=True,**storer_attrs):
"""store & update data[frame] and user attributes to HDFobj
call <hdf_obj_store_attributes> to update user attributes in HDFobj
Parameters
----------
df : pandas DataFrame or Panel
key : node name <None
attrs: user atributes <**storer_attr>
reset: will reset/clear dataframe first <True>
"""
#-- avoid error in python3 pandas HDF e.g: STI 13 => STI-13
if not self.is_open:
return None
#--- fist clean HDF df & parameter
if reset:
self.reset_key(key)
#--- copy dataframe to HDF
self.HDF[key] = df
#--- update attributes e.g. save dicts like parameter,info ...
# return self.store_attributes(key=key,**storer_attrs)
def bads2list(self,bads,sep=","):
"""
:param bads:
:param sep:
:return:
"""
if not bads:
return []
elif isinstance(bads,str):
return bads.split(sep)
return bads
def get_key(self,id=None,scan=None,fname=None):
"""
:param id:
:param scan:
:param fname:
:return:
key e.g: id + "/" + scan + "/" + run_name
Example:
--------
input : fname = 123456_FREEVIEW01_200108_0955_1_c,rfDC,meeg-raw.fif
output: 123456/FREEVIEW01/123456_FREEVIEW01_200108_0955_1
"""
pdf_name = "_".join(os.path.basename(fname).split("_")[0:-1])
id = id if id else pdf_name.split("_")[0]
scan = scan if scan else pdf_name.split("_")[1]
return id + "/" + scan + "/" + pdf_name
def get_bads(self,id=None,scan=None,fname=None,key=None):
"""
:param id:
:param scan:
:param fname:
:return:
"""
if not self.is_open: return
if not key:
if not fname: return
key = self.get_key(id=id,scan=scan,fname=fname)
try:
return self.HDF[key]
except:
logger.exception("\n".join(["---> ERROR can not get bads from HDF:"
" -> filename: {}".format(fname),
" -> bads : {}".format(bads),
" -> HDF key : {}".format(key),
" -> HDf file: {}".format(self.HDF.filename)]))
def get_badslist(self,**kwargs):
"""
:param id:
:param scan:
:param fname:
:return:
"""
s = self.get_bads(**kwargs)
if isinstance(s,pd.Series):
return s.tolist()
def set_bads(self,id=None,scan=None,fname=None,bads=None):
"""
set list of bads in HDF as pd.Series
build HDF key : id + "/" + scan + "/" + pdf_name
:param id:
:param scan:
:param fname: (full)filename
:param bads: string or list of strings
:return:
Example:
--------
id = "123456"
scan = "FREEVIEW01"
fname = "$JUMEG_TEST_DATA/mne/123456/FREEVIEW01/180109_0955/1/123456_FREEVIEW01_180109_0955_1_c,rfDC,meeg-raw.fif
bads = "MEG 007,MEG 142,MEG 156,RFM 011"
BCT.set_bads(fname=fname,bads=bads)
BCT.set_bads(id=id,scan=scan,fname=fname,bads=bads)
"""
if not self.is_open: return
if not fname : return
key = self.get_key(id=id,scan=scan,fname=fname)
try:
self.HDF[key] = pd.Series( self.bads2list(bads) ) # ["MEG 007","MEG 142","MEG 156","RFM 011"]
except:
logger.exception("\n".join(["---> ERROR can not set bads in HDF:"
" -> filename: {}".format(fname),
" -> bads : {}".format(bads),
" -> HDF key : {}".format(key),
" -> HDf file: {}".format(self.HDF.filename)]))
self.HDF.flush()
def show(self):
if self.is_open:
msg=[]
for key in self.HDF.keys():
msg.append(" -> {} : {}".format(key,self.get_badslist(key=key)))
logger.info( "\n".join(msg))
def update_bads_in_hdf(fhdf=None,bads=None,path=None,fname=None,overwrite=False,verbose=False):
"""
:param fhdf
:param path:
:param fname:
:param overwrite:
:param verbose:
:return:
"""
BCT = JuMEG_BadChannelTable(verbose=True)
BCT.open(fhdf=fhdf,path=path,fname=fname,overwrite=overwrite)
BCT.set_bads(fname=fname,bads=bads)
BCT.close()
#--- test
def _test():
path = "$JUMEG_TEST_DATA/mne"
fname = "FREEVIEW01"
BCT = JuMEG_BadChannelTable(verbose=True)
BCT.open(path=path,fname=fname,overwrite=True)
id = "123456"
scan = "FREEVIEW01"
fname = "$JUMEG_TEST_DATA/mne/211747/FREEVIEW01/180109_0955/1/211747_FREEVIEW01_180109_0955_1_c,rfDC,meeg-raw.fif"
bads = "MEG 007,MEG 142,MEG 156,RFM 011"
BCT.set_bads(id=id,scan=scan,fname=fname,bads=bads)
BCT.show()
fname = "$JUMEG_TEST_DATA/mne/211747/FREEVIEW01/180109_0955/1/211747_FREEVIEW01_180109_0955_2_c,rfDC,meeg-raw.fif"
BCT.set_bads(id=id,scan=scan,fname=fname,bads=bads)
BCT.show()
BCT.set_bads(id=id,scan=scan,fname=fname,bads="NIX")
BCT.show()
print("bads: {}".format( BCT.get_badslist(fname=fname) ))
BCT.close()
if __name__ == "__main__":
#--- init/update logger
logger=jumeg_logger.setup_script_logging(logger=logger,level=logging.DEBUG)
_test()
| bsd-3-clause |
chadnetzer/numpy-gaurdro | doc/example.py | 11 | 3503 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
describe : type
Explanation
output : type
Explanation
tuple : type
Explanation
items : type
even more explaining
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.